1/*
2 * Copyright (C) 2013-2021 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "FTLLowerDFGToB3.h"
28
29#if ENABLE(FTL_JIT)
30
31#include "AirCode.h"
32#include "AllowMacroScratchRegisterUsage.h"
33#include "AllowMacroScratchRegisterUsageIf.h"
34#include "AtomicsObject.h"
35#include "B3CheckValue.h"
36#include "B3PatchpointValue.h"
37#include "B3SlotBaseValue.h"
38#include "B3StackmapGenerationParams.h"
39#include "B3ValueInlines.h"
40#include "ButterflyInlines.h"
41#include "CallFrameShuffler.h"
42#include "DFGAbstractInterpreterInlines.h"
43#include "DFGCapabilities.h"
44#include "DFGClobberize.h"
45#include "DFGDoesGC.h"
46#include "DFGDominators.h"
47#include "DFGInPlaceAbstractState.h"
48#include "DFGLivenessAnalysisPhase.h"
49#include "DFGMayExit.h"
50#include "DFGOSRAvailabilityAnalysisPhase.h"
51#include "DFGOSRExitFuzz.h"
52#include "DirectArguments.h"
53#include "FTLAbstractHeapRepository.h"
54#include "FTLExceptionTarget.h"
55#include "FTLForOSREntryJITCode.h"
56#include "FTLFormattedValue.h"
57#include "FTLLazySlowPathCall.h"
58#include "FTLLoweredNodeValue.h"
59#include "FTLOperations.h"
60#include "FTLOutput.h"
61#include "FTLPatchpointExceptionHandle.h"
62#include "FTLSnippetParams.h"
63#include "FTLThunks.h"
64#include "FTLWeightedTarget.h"
65#include "JITAddGenerator.h"
66#include "JITBitAndGenerator.h"
67#include "JITBitOrGenerator.h"
68#include "JITBitXorGenerator.h"
69#include "JITDivGenerator.h"
70#include "JITInlineCacheGenerator.h"
71#include "JITLeftShiftGenerator.h"
72#include "JITMathIC.h"
73#include "JITMulGenerator.h"
74#include "JITRightShiftGenerator.h"
75#include "JITSubGenerator.h"
76#include "JSArrayIterator.h"
77#include "JSAsyncFunction.h"
78#include "JSAsyncGenerator.h"
79#include "JSAsyncGeneratorFunction.h"
80#include "JSBoundFunction.h"
81#include "JSCInlines.h"
82#include "JSGenerator.h"
83#include "JSGeneratorFunction.h"
84#include "JSImmutableButterfly.h"
85#include "JSInternalPromise.h"
86#include "JSLexicalEnvironment.h"
87#include "JSMapIterator.h"
88#include "JSSetIterator.h"
89#include "LLIntThunks.h"
90#include "OperandsInlines.h"
91#include "ProbeContext.h"
92#include "RegExpObject.h"
93#include "ScratchRegisterAllocator.h"
94#include "SetupVarargsFrame.h"
95#include "ShadowChicken.h"
96#include "StructureStubInfo.h"
97#include "SuperSampler.h"
98#include "ThunkGenerators.h"
99#include "VirtualRegister.h"
100#include <atomic>
101#include <wtf/Box.h>
102#include <wtf/RecursableLambda.h>
103#include <wtf/StdUnorderedSet.h>
104
105#undef RELEASE_ASSERT
106#define RELEASE_ASSERT(assertion) do { \
107 if (!(assertion)) { \
108 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
109 CRASH(); \
110 } \
111} while (0)
112
113namespace JSC { namespace FTL {
114
115using namespace B3;
116using namespace DFG;
117
118namespace {
119
120std::atomic<int> compileCounter;
121
122#if ASSERT_ENABLED
123static NO_RETURN_DUE_TO_CRASH JSC_DECLARE_JIT_OPERATION(ftlUnreachable, void, (CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex));
124JSC_DEFINE_JIT_OPERATION_WITH_ATTRIBUTES(ftlUnreachable, NO_RETURN_DUE_TO_CRASH, void, (CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex))
125{
126 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
127 if (nodeIndex != UINT_MAX)
128 dataLog(", node @", nodeIndex);
129 dataLog(".\n");
130 CRASH();
131}
132#endif // ASSERT_ENABLED
133
134// Using this instead of typeCheck() helps to reduce the load on B3, by creating
135// significantly less dead code.
136#define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
137 FormattedValue _ftc_lowValue = (lowValue); \
138 Edge _ftc_highValue = (highValue); \
139 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
140 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
141 break; \
142 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
143 } while (false)
144
145#define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
146 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
147
148class LowerDFGToB3 {
149 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
150public:
151 LowerDFGToB3(State& state)
152 : m_graph(state.graph)
153 , m_ftlState(state)
154 , m_out(state)
155 , m_proc(*state.proc)
156 , m_availabilityCalculator(m_graph)
157 , m_state(state.graph)
158 , m_interpreter(state.graph, m_state)
159 {
160 if (Options::validateAbstractInterpreterState()) {
161 performLivenessAnalysis(m_graph);
162
163 // We only use node liveness here, not combined liveness, as we only track
164 // AI state for live nodes.
165 for (DFG::BasicBlock* block : m_graph.blocksInNaturalOrder()) {
166 NodeSet live;
167
168 for (NodeFlowProjection node : block->ssa->liveAtTail) {
169 if (node.kind() == NodeFlowProjection::Primary)
170 live.addVoid(node.node());
171 }
172
173 for (unsigned i = block->size(); i--; ) {
174 Node* node = block->at(i);
175 live.remove(node);
176 m_graph.doToChildren(node, [&] (Edge child) {
177 live.addVoid(child.node());
178 });
179 m_liveInToNode.add(node, live);
180 }
181 }
182 }
183 }
184
185 void lower()
186 {
187 State* state = &m_ftlState;
188
189 CString name;
190 if (verboseCompilationEnabled()) {
191 name = toCString(
192 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
193 "_", codeBlock()->hash());
194 } else
195 name = "jsBody";
196
197 {
198 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
199 CodeBlock* codeBlock = m_graph.m_codeBlock;
200
201 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
202 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
203 AllowMacroScratchRegisterUsage allowScratch(jit);
204 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
205
206 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
207 jit.emitPutToCallFrameHeader(codeBlock, VirtualRegister(CallFrameSlot::codeBlock));
208 });
209
210 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeIndex.keys()) {
211 RELEASE_ASSERT(catchEntrypointIndex != 0);
212 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
213 }
214
215 if (m_graph.m_maxLocalsForCatchOSREntry) {
216 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
217 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
218 }
219 }
220
221 m_graph.ensureSSADominators();
222
223 if (verboseCompilationEnabled())
224 dataLog("Function ready, beginning lowering.\n");
225
226 m_out.initialize(m_heaps);
227
228 // We use prologue frequency for all of the initialization code.
229 m_out.setFrequency(1);
230
231 bool hasMultipleEntrypoints = m_graph.m_numberOfEntrypoints > 1;
232
233 LBasicBlock prologue = m_out.newBlock();
234 LBasicBlock callEntrypointArgumentSpeculations = hasMultipleEntrypoints ? m_out.newBlock() : nullptr;
235 m_handleExceptions = m_out.newBlock();
236
237 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
238 m_highBlock = m_graph.block(blockIndex);
239 if (!m_highBlock)
240 continue;
241 m_out.setFrequency(m_highBlock->executionCount);
242 m_blocks.add(m_highBlock, m_out.newBlock());
243 }
244
245 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
246 m_out.setFrequency(1);
247
248 m_out.appendTo(prologue, hasMultipleEntrypoints ? callEntrypointArgumentSpeculations : m_handleExceptions);
249 m_out.initializeConstants(m_proc, prologue);
250 createPhiVariables();
251
252 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
253 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
254 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
255 state->capturedValue = capturedBase->slot();
256
257 auto preOrder = m_graph.blocksInPreOrder();
258
259 VM* vm = &this->vm();
260
261 m_callFrame = m_out.framePointer();
262 m_vmValue = m_out.constIntPtr(vm);
263 m_numberTag = m_out.constInt64(JSValue::NumberTag);
264 m_notCellMask = m_out.constInt64(JSValue::NotCellMask);
265 if (Options::validateDFGClobberize())
266 m_out.store32As8(m_out.int32Zero, m_out.absolute(reinterpret_cast<char*>(vm) + OBJECT_OFFSETOF(VM, didEnterVM)));
267
268 // Make sure that B3 knows that we really care about the mask registers. This forces the
269 // constants to be materialized in registers.
270 m_proc.addFastConstant(m_numberTag->key());
271 m_proc.addFastConstant(m_notCellMask->key());
272
273 // We don't want the CodeBlock to have a weak pointer to itself because
274 // that would cause it to always get collected.
275 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(VirtualRegister(CallFrameSlot::codeBlock)));
276
277 // Stack Overflow Check.
278 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
279 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
280 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
281 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(BytecodeIndex(0)));
282 stackOverflowHandler->appendSomeRegister(m_callFrame);
283 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
284 stackOverflowHandler->numGPScratchRegisters = 1;
285 stackOverflowHandler->setGenerator(
286 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
287 AllowMacroScratchRegisterUsage allowScratch(jit);
288 GPRReg fp = params[0].gpr();
289 GPRReg scratch = params.gpScratch(0);
290
291 unsigned ftlFrameSize = params.proc().frameSize();
292 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
293
294 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
295 MacroAssembler::JumpList stackOverflow;
296 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
297 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
298 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
299
300 params.addLatePath([=] (CCallHelpers& jit) {
301 AllowMacroScratchRegisterUsage allowScratch(jit);
302
303 stackOverflow.link(&jit);
304
305 // FIXME: We would not have to do this if the stack check was part of the Air
306 // prologue. Then, we would know that there is no way for the callee-saves to
307 // get clobbered.
308 // https://bugs.webkit.org/show_bug.cgi?id=172456
309 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
310
311 jit.store32(
312 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
313 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
314 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
315
316 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR0);
317 jit.prepareCallOperation(*vm);
318 CCallHelpers::Call throwCall = jit.call(OperationPtrTag);
319
320 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
321 jit.prepareCallOperation(*vm);
322 CCallHelpers::Call lookupExceptionHandlerCall = jit.call(OperationPtrTag);
323 jit.jumpToExceptionHandler(*vm);
324
325 jit.addLinkTask(
326 [=] (LinkBuffer& linkBuffer) {
327 linkBuffer.link(throwCall, FunctionPtr<OperationPtrTag>(operationThrowStackOverflowError));
328 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(operationLookupExceptionHandlerFromCallerFrame));
329 });
330 });
331 });
332
333 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
334
335 {
336 if (hasMultipleEntrypoints) {
337 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
338 successors[0] = callEntrypointArgumentSpeculations;
339 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
340 // Currently, the only other entrypoint is an op_catch entrypoint.
341 // We do OSR entry at op_catch, and we prove argument formats before
342 // jumping to FTL code, so we don't need to check argument types here
343 // for these entrypoints.
344 successors[i] = firstDFGBasicBlock;
345 }
346
347 m_out.entrySwitch(successors);
348 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
349 }
350
351 m_node = nullptr;
352 m_nodeIndexInGraph = 0;
353 m_origin = NodeOrigin(CodeOrigin(BytecodeIndex(0)), CodeOrigin(BytecodeIndex(0)), true);
354
355 // Check Arguments.
356 availabilityMap().clear();
357 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0, 0);
358 for (unsigned i = codeBlock()->numParameters(); i--;) {
359 availabilityMap().m_locals.argument(i) =
360 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgumentIncludingThis(i)));
361 }
362
363 if (m_graph.m_plan.mode() == JITCompilationMode::FTLForOSREntry) {
364 auto* jitCode = m_ftlState.jitCode->ftlForOSREntry();
365 FixedVector<DFG::FlushFormat> argumentFlushFormats(codeBlock()->numParameters());
366 for (unsigned i = 0; i < codeBlock()->numParameters(); ++i)
367 argumentFlushFormats[i] = m_graph.m_argumentFormats[0][i];
368 jitCode->setArgumentFlushFormats(WTFMove(argumentFlushFormats));
369 } else {
370 for (unsigned i = codeBlock()->numParameters(); i--;) {
371 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
372 VirtualRegister operand = virtualRegisterForArgumentIncludingThis(i);
373 LValue jsValue = m_out.load64(addressFor(operand));
374
375 switch (m_graph.m_argumentFormats[0][i]) {
376 case FlushedInt32:
377 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
378 break;
379 case FlushedBoolean:
380 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
381 break;
382 case FlushedCell:
383 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
384 break;
385 case FlushedJSValue:
386 break;
387 default:
388 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
389 break;
390 }
391 }
392 }
393
394 m_out.jump(firstDFGBasicBlock);
395 }
396
397
398 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
399 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
400 m_out.patchpoint(Void)->setGenerator(
401 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
402 CCallHelpers::Jump jump = jit.jump();
403 jit.addLinkTask(
404 [=] (LinkBuffer& linkBuffer) {
405 linkBuffer.link(jump, linkBuffer.locationOf<ExceptionHandlerPtrTag>(*exceptionHandler));
406 });
407 });
408 m_out.unreachable();
409
410 for (DFG::BasicBlock* block : preOrder)
411 compileBlock(block);
412
413 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
414 // to happen last because our abstract heaps are generated lazily. They have to be
415 // generated lazily because we have an infinite number of numbered, indexed, and
416 // absolute heaps. We only become aware of the ones we actually mention while lowering.
417 m_heaps.computeRangesAndDecorateInstructions();
418
419 // We create all Phi's up front, but we may then decide not to compile the basic block
420 // that would have contained one of them. So this creates orphans, which triggers B3
421 // validation failures. Calling this fixes the issue.
422 //
423 // Note that you should avoid the temptation to make this call conditional upon
424 // validation being enabled. B3 makes no guarantees of any kind of correctness when
425 // dealing with IR that would have failed validation. For example, it would be valid to
426 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
427 // if any orphans were around. We might even have such phases already.
428 m_proc.deleteOrphans();
429
430 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
431 m_out.applyBlockOrder();
432 }
433
434private:
435
436 void createPhiVariables()
437 {
438 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
439 DFG::BasicBlock* block = m_graph.block(blockIndex);
440 if (!block)
441 continue;
442 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
443 Node* node = block->at(nodeIndex);
444 if (node->op() != DFG::Phi)
445 continue;
446 LType type;
447 switch (node->flags() & NodeResultMask) {
448 case NodeResultDouble:
449 type = Double;
450 break;
451 case NodeResultInt32:
452 type = Int32;
453 break;
454 case NodeResultInt52:
455 type = Int64;
456 break;
457 case NodeResultBoolean:
458 type = Int32;
459 break;
460 case NodeResultJS:
461 type = Int64;
462 break;
463 default:
464 DFG_CRASH(m_graph, node, "Bad Phi node result type");
465 break;
466 }
467 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
468 }
469 }
470 }
471
472 void compileBlock(DFG::BasicBlock* block)
473 {
474 if (!block)
475 return;
476
477 if (verboseCompilationEnabled())
478 dataLog("Compiling block ", *block, "\n");
479
480 m_highBlock = block;
481
482 // Make sure that any blocks created while lowering code in the high block have the frequency of
483 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
484 // something roughly approximate for things like register allocation.
485 m_out.setFrequency(m_highBlock->executionCount);
486
487 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
488
489 m_nextHighBlock = nullptr;
490 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
491 m_nextHighBlock = m_graph.block(nextBlockIndex);
492 if (m_nextHighBlock)
493 break;
494 }
495 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : nullptr;
496
497 // All of this effort to find the next block gives us the ability to keep the
498 // generated IR in roughly program order. This ought not affect the performance
499 // of the generated code (since we expect B3 to reorder things) but it will
500 // make IR dumps easier to read.
501 m_out.appendTo(lowBlock, m_nextLowBlock);
502
503 if (Options::ftlCrashes())
504 m_out.trap();
505
506 if (!m_highBlock->cfaHasVisited) {
507 if (verboseCompilationEnabled())
508 dataLog("Bailing because CFA didn't reach.\n");
509 crash(m_highBlock, nullptr);
510 return;
511 }
512
513 m_aiCheckedNodes.clear();
514
515 m_availabilityCalculator.beginBlock(m_highBlock);
516
517 m_state.reset();
518 m_state.beginBasicBlock(m_highBlock);
519
520 if (Options::validateDFGClobberize()) {
521 bool clobberedWorld = m_highBlock->predecessors.isEmpty() || m_highBlock->isOSRTarget || m_highBlock->isCatchEntrypoint;
522 auto validateClobberize = [&] () {
523 clobberedWorld = true;
524 };
525
526 for (auto* predecessor : m_highBlock->predecessors)
527 clobberize(m_graph, predecessor->terminal(), [] (auto...) { }, [] (auto...) { }, [] (auto...) { }, validateClobberize);
528
529 if (!clobberedWorld) {
530 LValue didNotEnterVM = m_out.notZero32(m_out.load8ZeroExt32(m_out.absolute(&vm().didEnterVM)));
531 auto* check = m_out.speculate(didNotEnterVM);
532 check->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) {
533 jit.breakpoint();
534 });
535 } else
536 m_out.store(m_out.int32Zero, m_out.absolute(&vm().didEnterVM));
537 }
538
539 for (unsigned nodeIndex = 0; nodeIndex < m_highBlock->size(); ++nodeIndex) {
540 if (!compileNode(nodeIndex))
541 break;
542 }
543 }
544
545 void safelyInvalidateAfterTermination()
546 {
547 if (verboseCompilationEnabled())
548 dataLog("Bailing.\n");
549 crash();
550
551 // Invalidate dominated blocks. Under normal circumstances we would expect
552 // them to be invalidated already. But you can have the CFA become more
553 // precise over time because the structures of objects change on the main
554 // thread. Failing to do this would result in weird crashes due to a value
555 // being used but not defined. Race conditions FTW!
556 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
557 DFG::BasicBlock* target = m_graph.block(blockIndex);
558 if (!target)
559 continue;
560 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
561 if (verboseCompilationEnabled())
562 dataLog("Block ", *target, " will bail also.\n");
563 target->cfaHasVisited = false;
564 }
565 }
566 }
567
568 void validateAIState(Node* node)
569 {
570 if (!m_graphDump) {
571 StringPrintStream out;
572 m_graph.dump(out);
573 m_graphDump = out.tryToString().value_or("<out of memory while dumping graph>"_s);
574 }
575
576 switch (node->op()) {
577 case MovHint:
578 case JSConstant:
579 case LazyJSConstant:
580 case DoubleConstant:
581 case Int52Constant:
582 case GetStack:
583 case PutStack:
584 case KillStack:
585 case ExitOK:
586 return;
587 default:
588 break;
589 }
590
591 // Before we execute node.
592 NodeSet& live = m_liveInToNode.find(node)->value;
593 unsigned highParentIndex = node->index();
594 {
595 if (intHash(highParentIndex) >= (static_cast<double>(std::numeric_limits<unsigned>::max()) + 1) * Options::validateAbstractInterpreterStateProbability())
596 return;
597 }
598
599 for (Node* node : live) {
600 if (node->isPhantomAllocation())
601 continue;
602
603 if (node->op() == AssertInBounds)
604 continue;
605 if (node->op() == CheckInBounds)
606 continue;
607
608 AbstractValue value = m_interpreter.forNode(node);
609 {
610 auto iter = m_aiCheckedNodes.find(node);
611 if (iter != m_aiCheckedNodes.end()) {
612 AbstractValue checkedValue = iter->value;
613 if (checkedValue == value) {
614 if (!(value.m_type & SpecCell))
615 continue;
616 }
617 }
618 m_aiCheckedNodes.set(node, value);
619 }
620
621 FlushFormat flushFormat;
622 LValue input;
623 if (node->hasJSResult()) {
624 input = lowJSValue(Edge(node, UntypedUse));
625 flushFormat = FlushedJSValue;
626 } else if (node->hasDoubleResult()) {
627 input = lowDouble(Edge(node, DoubleRepUse));
628 flushFormat = FlushedDouble;
629 } else if (node->hasInt52Result()) {
630 input = strictInt52ToJSValue(lowStrictInt52(Edge(node, Int52RepUse)));
631 flushFormat = FlushedInt52;
632 } else
633 continue;
634
635 unsigned highChildIndex = node->index();
636
637 String graphDump = m_graphDump;
638
639 PatchpointValue* patchpoint = m_out.patchpoint(Void);
640 patchpoint->effects = Effects::none();
641 patchpoint->effects.reads = HeapRange::top();
642 patchpoint->effects.writesLocalState = true;
643 patchpoint->appendSomeRegister(input);
644 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
645 GPRReg reg = InvalidGPRReg;
646 FPRReg fpReg = InvalidFPRReg;
647 if (flushFormat == FlushedDouble)
648 fpReg = params[0].fpr();
649 else
650 reg = params[0].gpr();
651 jit.probeDebug([=] (Probe::Context& context) {
652 JSValue input;
653 double doubleInput;
654
655 auto dumpAndCrash = [&] {
656 dataLogLn("Validation failed at node: @", highParentIndex);
657 dataLogLn("Failed validating live value: @", highChildIndex);
658 dataLogLn();
659 dataLogLn("Expected AI value = ", value);
660 if (flushFormat != FlushedDouble)
661 dataLogLn("Unexpected value = ", input);
662 else
663 dataLogLn("Unexpected double value = ", doubleInput);
664 dataLogLn();
665 dataLogLn(graphDump);
666 CRASH();
667 };
668
669 if (flushFormat == FlushedDouble) {
670 doubleInput = context.fpr(fpReg);
671 SpeculatedType type;
672 if (!std::isnan(doubleInput))
673 type = speculationFromValue(jsDoubleNumber(doubleInput));
674 else if (isImpureNaN(doubleInput))
675 type = SpecDoubleImpureNaN;
676 else
677 type = SpecDoublePureNaN;
678
679 if (!value.couldBeType(type))
680 dumpAndCrash();
681 } else {
682 input = JSValue::decode(context.gpr(reg));
683 if (flushFormat == FlushedInt52) {
684 RELEASE_ASSERT(input.isAnyInt());
685 input = jsDoubleNumber(input.asAnyInt());
686 }
687 if (!value.validateOSREntryValue(input, flushFormat))
688 dumpAndCrash();
689 }
690
691 });
692 });
693 }
694 }
695
696 bool compileNode(unsigned nodeIndex)
697 {
698 if (!m_state.isValid()) {
699 safelyInvalidateAfterTermination();
700 return false;
701 }
702
703 m_node = m_highBlock->at(nodeIndex);
704 m_nodeIndexInGraph = m_node->index();
705 m_origin = m_node->origin;
706 m_out.setOrigin(m_node);
707
708 if (verboseCompilationEnabled())
709 dataLog("Lowering ", m_node, "\n");
710
711 m_interpreter.startExecuting();
712 m_interpreter.executeKnownEdgeTypes(m_node);
713
714 if (Options::validateAbstractInterpreterState())
715 validateAIState(m_node);
716
717 if constexpr (validateDFGDoesGC) {
718 if (Options::validateDoesGC()) {
719 bool expectDoesGC = doesGC(m_graph, m_node);
720 m_out.store(m_out.constInt32(DoesGCCheck::encode(expectDoesGC, m_node->index(), m_node->op())), m_out.absolute(vm().heap.addressOfDoesGC()));
721 }
722 }
723
724 switch (m_node->op()) {
725 case DFG::Upsilon:
726 compileUpsilon();
727 break;
728 case DFG::Phi:
729 compilePhi();
730 break;
731 case JSConstant:
732 break;
733 case DoubleConstant:
734 compileDoubleConstant();
735 break;
736 case Int52Constant:
737 compileInt52Constant();
738 break;
739 case LazyJSConstant:
740 compileLazyJSConstant();
741 break;
742 case DoubleRep:
743 compileDoubleRep();
744 break;
745 case DoubleAsInt32:
746 compileDoubleAsInt32();
747 break;
748 case DFG::ValueRep:
749 compileValueRep();
750 break;
751 case Int52Rep:
752 compileInt52Rep();
753 break;
754 case ValueToInt32:
755 compileValueToInt32();
756 break;
757 case BooleanToNumber:
758 compileBooleanToNumber();
759 break;
760 case ExtractOSREntryLocal:
761 compileExtractOSREntryLocal();
762 break;
763 case ExtractCatchLocal:
764 compileExtractCatchLocal();
765 break;
766 case ClearCatchLocals:
767 compileClearCatchLocals();
768 break;
769 case GetStack:
770 compileGetStack();
771 break;
772 case PutStack:
773 compilePutStack();
774 break;
775 case DFG::Check:
776 case CheckVarargs:
777 compileNoOp();
778 break;
779 case ToObject:
780 case CallObjectConstructor:
781 compileToObjectOrCallObjectConstructor();
782 break;
783 case ToThis:
784 compileToThis();
785 break;
786 case Inc:
787 case Dec:
788 compileIncOrDec();
789 break;
790 case ValueNegate:
791 compileValueNegate();
792 break;
793 case ValueAdd:
794 compileValueAdd();
795 break;
796 case ValueSub:
797 compileValueSub();
798 break;
799 case ValueMul:
800 compileValueMul();
801 break;
802 case StrCat:
803 compileStrCat();
804 break;
805 case ArithAdd:
806 case ArithSub:
807 compileArithAddOrSub();
808 break;
809 case ArithClz32:
810 compileArithClz32();
811 break;
812 case ArithMul:
813 compileArithMul();
814 break;
815 case ValueDiv:
816 compileValueDiv();
817 break;
818 case ArithDiv:
819 compileArithDiv();
820 break;
821 case ValueMod:
822 compileValueMod();
823 break;
824 case ArithMod:
825 compileArithMod();
826 break;
827 case ArithMin:
828 case ArithMax:
829 compileArithMinOrMax();
830 break;
831 case ArithAbs:
832 compileArithAbs();
833 break;
834 case ValuePow:
835 compileValuePow();
836 break;
837 case ArithPow:
838 compileArithPow();
839 break;
840 case ArithRandom:
841 compileArithRandom();
842 break;
843 case ArithRound:
844 compileArithRound();
845 break;
846 case ArithFloor:
847 compileArithFloor();
848 break;
849 case ArithCeil:
850 compileArithCeil();
851 break;
852 case ArithTrunc:
853 compileArithTrunc();
854 break;
855 case ArithSqrt:
856 compileArithSqrt();
857 break;
858 case ArithFRound:
859 compileArithFRound();
860 break;
861 case ArithNegate:
862 compileArithNegate();
863 break;
864 case ArithUnary:
865 compileArithUnary();
866 break;
867 case ValueBitNot:
868 compileValueBitNot();
869 break;
870 case ArithBitNot:
871 compileArithBitNot();
872 break;
873 case ValueBitAnd:
874 compileValueBitAnd();
875 break;
876 case ArithBitAnd:
877 compileArithBitAnd();
878 break;
879 case ValueBitOr:
880 compileValueBitOr();
881 break;
882 case ArithBitOr:
883 compileArithBitOr();
884 break;
885 case ArithBitXor:
886 compileArithBitXor();
887 break;
888 case ValueBitXor:
889 compileValueBitXor();
890 break;
891 case ValueBitRShift:
892 compileValueBitRShift();
893 break;
894 case ArithBitRShift:
895 compileArithBitRShift();
896 break;
897 case ArithBitLShift:
898 compileArithBitLShift();
899 break;
900 case ValueBitLShift:
901 compileValueBitLShift();
902 break;
903 case BitURShift:
904 compileBitURShift();
905 break;
906 case UInt32ToNumber:
907 compileUInt32ToNumber();
908 break;
909 case CheckStructure:
910 compileCheckStructure();
911 break;
912 case CheckStructureOrEmpty:
913 compileCheckStructureOrEmpty();
914 break;
915 case CheckIsConstant:
916 compileCheckIsConstant();
917 break;
918 case CheckNotEmpty:
919 compileCheckNotEmpty();
920 break;
921 case AssertNotEmpty:
922 compileAssertNotEmpty();
923 break;
924 case CheckBadValue:
925 compileCheckBadValue();
926 break;
927 case CheckIdent:
928 compileCheckIdent();
929 break;
930 case GetExecutable:
931 compileGetExecutable();
932 break;
933 case Arrayify:
934 case ArrayifyToStructure:
935 compileArrayify();
936 break;
937 case PutStructure:
938 compilePutStructure();
939 break;
940 case TryGetById:
941 compileGetById(AccessType::TryGetById);
942 break;
943 case GetById:
944 case GetByIdFlush:
945 compileGetById(AccessType::GetById);
946 break;
947 case GetByIdWithThis:
948 compileGetByIdWithThis();
949 break;
950 case GetByIdDirect:
951 case GetByIdDirectFlush:
952 compileGetById(AccessType::GetByIdDirect);
953 break;
954 case GetPrivateName:
955 compileGetPrivateName();
956 break;
957 case GetPrivateNameById:
958 compileGetPrivateNameById();
959 break;
960 case InById:
961 compileInById();
962 break;
963 case InByVal:
964 compileInByVal();
965 break;
966 case HasPrivateName:
967 compileHasPrivateName();
968 break;
969 case HasPrivateBrand:
970 compileHasPrivateBrand();
971 break;
972 case CheckPrivateBrand:
973 compileCheckPrivateBrand();
974 break;
975 case SetPrivateBrand:
976 compileSetPrivateBrand();
977 break;
978 case HasOwnProperty:
979 compileHasOwnProperty();
980 break;
981 case PutById:
982 case PutByIdDirect:
983 case PutByIdFlush:
984 compilePutById();
985 break;
986 case PutByIdWithThis:
987 compilePutByIdWithThis();
988 break;
989 case PutGetterById:
990 case PutSetterById:
991 compilePutAccessorById();
992 break;
993 case PutGetterSetterById:
994 compilePutGetterSetterById();
995 break;
996 case PutGetterByVal:
997 case PutSetterByVal:
998 compilePutAccessorByVal();
999 break;
1000 case DeleteById:
1001 compileDeleteById();
1002 break;
1003 case DeleteByVal:
1004 compileDeleteByVal();
1005 break;
1006 case GetButterfly:
1007 compileGetButterfly();
1008 break;
1009 case ConstantStoragePointer:
1010 compileConstantStoragePointer();
1011 break;
1012 case GetIndexedPropertyStorage:
1013 compileGetIndexedPropertyStorage();
1014 break;
1015 case CheckArray:
1016 compileCheckArray();
1017 break;
1018 case CheckArrayOrEmpty:
1019 compileCheckArrayOrEmpty();
1020 break;
1021 case CheckDetached:
1022 compileCheckDetached();
1023 break;
1024 case GetArrayLength:
1025 compileGetArrayLength();
1026 break;
1027 case GetVectorLength:
1028 compileGetVectorLength();
1029 break;
1030 case AssertInBounds:
1031 compileAssertInBounds();
1032 break;
1033 case CheckInBounds:
1034 compileCheckInBounds();
1035 break;
1036 case GetByVal:
1037 compileGetByVal();
1038 break;
1039 case GetMyArgumentByVal:
1040 case GetMyArgumentByValOutOfBounds:
1041 compileGetMyArgumentByVal();
1042 break;
1043 case GetByValWithThis:
1044 compileGetByValWithThis();
1045 break;
1046 case PutByVal:
1047 case PutByValAlias:
1048 case PutByValDirect:
1049 compilePutByVal();
1050 break;
1051 case PutByValWithThis:
1052 compilePutByValWithThis();
1053 break;
1054 case PutPrivateName:
1055 compilePutPrivateName();
1056 break;
1057 case PutPrivateNameById:
1058 compilePutPrivateNameById();
1059 break;
1060 case AtomicsAdd:
1061 case AtomicsAnd:
1062 case AtomicsCompareExchange:
1063 case AtomicsExchange:
1064 case AtomicsLoad:
1065 case AtomicsOr:
1066 case AtomicsStore:
1067 case AtomicsSub:
1068 case AtomicsXor:
1069 compileAtomicsReadModifyWrite();
1070 break;
1071 case AtomicsIsLockFree:
1072 compileAtomicsIsLockFree();
1073 break;
1074 case DefineDataProperty:
1075 compileDefineDataProperty();
1076 break;
1077 case DefineAccessorProperty:
1078 compileDefineAccessorProperty();
1079 break;
1080 case ArrayPush:
1081 compileArrayPush();
1082 break;
1083 case ArrayPop:
1084 compileArrayPop();
1085 break;
1086 case ArraySlice:
1087 compileArraySlice();
1088 break;
1089 case ArrayIndexOf:
1090 compileArrayIndexOf();
1091 break;
1092 case CreateActivation:
1093 compileCreateActivation();
1094 break;
1095 case PushWithScope:
1096 compilePushWithScope();
1097 break;
1098 case NewFunction:
1099 case NewGeneratorFunction:
1100 case NewAsyncGeneratorFunction:
1101 case NewAsyncFunction:
1102 compileNewFunction();
1103 break;
1104 case CreateDirectArguments:
1105 compileCreateDirectArguments();
1106 break;
1107 case CreateScopedArguments:
1108 compileCreateScopedArguments();
1109 break;
1110 case CreateClonedArguments:
1111 compileCreateClonedArguments();
1112 break;
1113 case CreateArgumentsButterfly:
1114 compileCreateArgumentsButterfly();
1115 break;
1116 case ObjectCreate:
1117 compileObjectCreate();
1118 break;
1119 case ObjectKeys:
1120 case ObjectGetOwnPropertyNames:
1121 compileObjectKeysOrObjectGetOwnPropertyNames();
1122 break;
1123 case NewObject:
1124 compileNewObject();
1125 break;
1126 case NewGenerator:
1127 compileNewGenerator();
1128 break;
1129 case NewAsyncGenerator:
1130 compileNewAsyncGenerator();
1131 break;
1132 case NewInternalFieldObject:
1133 compileNewInternalFieldObject();
1134 break;
1135 case NewStringObject:
1136 compileNewStringObject();
1137 break;
1138 case NewSymbol:
1139 compileNewSymbol();
1140 break;
1141 case NewArray:
1142 compileNewArray();
1143 break;
1144 case NewArrayWithSpread:
1145 compileNewArrayWithSpread();
1146 break;
1147 case CreateThis:
1148 compileCreateThis();
1149 break;
1150 case CreatePromise:
1151 compileCreatePromise();
1152 break;
1153 case CreateGenerator:
1154 compileCreateGenerator();
1155 break;
1156 case CreateAsyncGenerator:
1157 compileCreateAsyncGenerator();
1158 break;
1159 case Spread:
1160 compileSpread();
1161 break;
1162 case NewArrayBuffer:
1163 compileNewArrayBuffer();
1164 break;
1165 case NewArrayWithSize:
1166 compileNewArrayWithSize();
1167 break;
1168 case NewTypedArray:
1169 compileNewTypedArray();
1170 break;
1171 case GetTypedArrayByteOffset:
1172 compileGetTypedArrayByteOffset();
1173 break;
1174 case GetPrototypeOf:
1175 compileGetPrototypeOf();
1176 break;
1177 case AllocatePropertyStorage:
1178 compileAllocatePropertyStorage();
1179 break;
1180 case ReallocatePropertyStorage:
1181 compileReallocatePropertyStorage();
1182 break;
1183 case NukeStructureAndSetButterfly:
1184 compileNukeStructureAndSetButterfly();
1185 break;
1186 case ToNumber:
1187 compileToNumber();
1188 break;
1189 case ToNumeric:
1190 compileToNumeric();
1191 break;
1192 case CallNumberConstructor:
1193 compileCallNumberConstructor();
1194 break;
1195 case ToString:
1196 case CallStringConstructor:
1197 case StringValueOf:
1198 compileToStringOrCallStringConstructorOrStringValueOf();
1199 break;
1200 case FunctionToString:
1201 compileFunctionToString();
1202 break;
1203 case ToPrimitive:
1204 compileToPrimitive();
1205 break;
1206 case ToPropertyKey:
1207 compileToPropertyKey();
1208 break;
1209 case MakeRope:
1210 compileMakeRope();
1211 break;
1212 case StringCharAt:
1213 compileStringCharAt();
1214 break;
1215 case StringCharCodeAt:
1216 compileStringCharCodeAt();
1217 break;
1218 case StringCodePointAt:
1219 compileStringCodePointAt();
1220 break;
1221 case StringFromCharCode:
1222 compileStringFromCharCode();
1223 break;
1224 case GetByOffset:
1225 case GetGetterSetterByOffset:
1226 compileGetByOffset();
1227 break;
1228 case GetGetter:
1229 compileGetGetter();
1230 break;
1231 case GetSetter:
1232 compileGetSetter();
1233 break;
1234 case MultiGetByOffset:
1235 compileMultiGetByOffset();
1236 break;
1237 case PutByOffset:
1238 compilePutByOffset();
1239 break;
1240 case MultiPutByOffset:
1241 compileMultiPutByOffset();
1242 break;
1243 case MultiDeleteByOffset:
1244 compileMultiDeleteByOffset();
1245 break;
1246 case MatchStructure:
1247 compileMatchStructure();
1248 break;
1249 case GetGlobalVar:
1250 case GetGlobalLexicalVariable:
1251 compileGetGlobalVariable();
1252 break;
1253 case PutGlobalVariable:
1254 compilePutGlobalVariable();
1255 break;
1256 case NotifyWrite:
1257 compileNotifyWrite();
1258 break;
1259 case GetCallee:
1260 compileGetCallee();
1261 break;
1262 case SetCallee:
1263 compileSetCallee();
1264 break;
1265 case GetArgumentCountIncludingThis:
1266 compileGetArgumentCountIncludingThis();
1267 break;
1268 case SetArgumentCountIncludingThis:
1269 compileSetArgumentCountIncludingThis();
1270 break;
1271 case GetScope:
1272 compileGetScope();
1273 break;
1274 case SkipScope:
1275 compileSkipScope();
1276 break;
1277 case GetGlobalObject:
1278 compileGetGlobalObject();
1279 break;
1280 case GetGlobalThis:
1281 compileGetGlobalThis();
1282 break;
1283 case GetClosureVar:
1284 compileGetClosureVar();
1285 break;
1286 case PutClosureVar:
1287 compilePutClosureVar();
1288 break;
1289 case GetInternalField:
1290 compileGetInternalField();
1291 break;
1292 case PutInternalField:
1293 compilePutInternalField();
1294 break;
1295 case GetFromArguments:
1296 compileGetFromArguments();
1297 break;
1298 case PutToArguments:
1299 compilePutToArguments();
1300 break;
1301 case GetArgument:
1302 compileGetArgument();
1303 break;
1304 case CompareEq:
1305 compileCompareEq();
1306 break;
1307 case CompareStrictEq:
1308 compileCompareStrictEq();
1309 break;
1310 case CompareLess:
1311 compileCompareLess();
1312 break;
1313 case CompareLessEq:
1314 compileCompareLessEq();
1315 break;
1316 case CompareGreater:
1317 compileCompareGreater();
1318 break;
1319 case CompareGreaterEq:
1320 compileCompareGreaterEq();
1321 break;
1322 case CompareBelow:
1323 compileCompareBelow();
1324 break;
1325 case CompareBelowEq:
1326 compileCompareBelowEq();
1327 break;
1328 case CompareEqPtr:
1329 compileCompareEqPtr();
1330 break;
1331 case SameValue:
1332 compileSameValue();
1333 break;
1334 case ToBoolean:
1335 compileToBoolean();
1336 break;
1337 case LogicalNot:
1338 compileLogicalNot();
1339 break;
1340 case Call:
1341 case TailCallInlinedCaller:
1342 case Construct:
1343 compileCallOrConstruct();
1344 break;
1345 case DirectCall:
1346 case DirectTailCallInlinedCaller:
1347 case DirectConstruct:
1348 case DirectTailCall:
1349 compileDirectCallOrConstruct();
1350 break;
1351 case TailCall:
1352 compileTailCall();
1353 break;
1354 case CallVarargs:
1355 case CallForwardVarargs:
1356 case TailCallVarargs:
1357 case TailCallVarargsInlinedCaller:
1358 case TailCallForwardVarargs:
1359 case TailCallForwardVarargsInlinedCaller:
1360 case ConstructVarargs:
1361 case ConstructForwardVarargs:
1362 compileCallOrConstructVarargs();
1363 break;
1364 case CallEval:
1365 compileCallEval();
1366 break;
1367 case VarargsLength:
1368 compileVarargsLength();
1369 break;
1370 case LoadVarargs:
1371 compileLoadVarargs();
1372 break;
1373 case ForwardVarargs:
1374 compileForwardVarargs();
1375 break;
1376 case DFG::Jump:
1377 compileJump();
1378 break;
1379 case DFG::Branch:
1380 compileBranch();
1381 break;
1382 case DFG::Switch:
1383 compileSwitch();
1384 break;
1385 case DFG::EntrySwitch:
1386 compileEntrySwitch();
1387 break;
1388 case DFG::Return:
1389 compileReturn();
1390 break;
1391 case ForceOSRExit:
1392 compileForceOSRExit();
1393 break;
1394 case CPUIntrinsic:
1395#if CPU(X86_64)
1396 compileCPUIntrinsic();
1397#else
1398 RELEASE_ASSERT_NOT_REACHED();
1399#endif
1400 break;
1401 case Throw:
1402 compileThrow();
1403 break;
1404 case ThrowStaticError:
1405 compileThrowStaticError();
1406 break;
1407 case InvalidationPoint:
1408 compileInvalidationPoint();
1409 break;
1410 case IsEmpty:
1411 compileIsEmpty();
1412 break;
1413 case TypeOfIsUndefined:
1414 compileTypeOfIsUndefined();
1415 break;
1416 case TypeOfIsObject:
1417 compileTypeOfIsObject();
1418 break;
1419 case TypeOfIsFunction:
1420 compileIsCallable(operationTypeOfIsFunction);
1421 break;
1422 case IsUndefinedOrNull:
1423 compileIsUndefinedOrNull();
1424 break;
1425 case IsBoolean:
1426 compileIsBoolean();
1427 break;
1428 case IsNumber:
1429 compileIsNumber();
1430 break;
1431 case IsBigInt:
1432 compileIsBigInt();
1433 break;
1434 case NumberIsInteger:
1435 compileNumberIsInteger();
1436 break;
1437 case IsCellWithType:
1438 compileIsCellWithType();
1439 break;
1440 case MapHash:
1441 compileMapHash();
1442 break;
1443 case NormalizeMapKey:
1444 compileNormalizeMapKey();
1445 break;
1446 case GetMapBucket:
1447 compileGetMapBucket();
1448 break;
1449 case GetMapBucketHead:
1450 compileGetMapBucketHead();
1451 break;
1452 case GetMapBucketNext:
1453 compileGetMapBucketNext();
1454 break;
1455 case LoadKeyFromMapBucket:
1456 compileLoadKeyFromMapBucket();
1457 break;
1458 case LoadValueFromMapBucket:
1459 compileLoadValueFromMapBucket();
1460 break;
1461 case ExtractValueFromWeakMapGet:
1462 compileExtractValueFromWeakMapGet();
1463 break;
1464 case SetAdd:
1465 compileSetAdd();
1466 break;
1467 case MapSet:
1468 compileMapSet();
1469 break;
1470 case WeakMapGet:
1471 compileWeakMapGet();
1472 break;
1473 case WeakSetAdd:
1474 compileWeakSetAdd();
1475 break;
1476 case WeakMapSet:
1477 compileWeakMapSet();
1478 break;
1479 case IsObject:
1480 compileIsObject();
1481 break;
1482 case IsCallable:
1483 compileIsCallable(operationObjectIsCallable);
1484 break;
1485 case IsConstructor:
1486 compileIsConstructor();
1487 break;
1488 case IsTypedArrayView:
1489 compileIsTypedArrayView();
1490 break;
1491 case ParseInt:
1492 compileParseInt();
1493 break;
1494 case TypeOf:
1495 compileTypeOf();
1496 break;
1497 case CheckTypeInfoFlags:
1498 compileCheckTypeInfoFlags();
1499 break;
1500 case OverridesHasInstance:
1501 compileOverridesHasInstance();
1502 break;
1503 case InstanceOf:
1504 compileInstanceOf();
1505 break;
1506 case InstanceOfCustom:
1507 compileInstanceOfCustom();
1508 break;
1509 case CountExecution:
1510 compileCountExecution();
1511 break;
1512 case SuperSamplerBegin:
1513 compileSuperSamplerBegin();
1514 break;
1515 case SuperSamplerEnd:
1516 compileSuperSamplerEnd();
1517 break;
1518 case StoreBarrier:
1519 case FencedStoreBarrier:
1520 compileStoreBarrier();
1521 break;
1522 case HasIndexedProperty:
1523 compileHasIndexedProperty(operationHasIndexedProperty);
1524 break;
1525 case HasEnumerableIndexedProperty:
1526 compileHasIndexedProperty(operationHasEnumerableIndexedProperty);
1527 break;
1528 case HasEnumerableStructureProperty:
1529 compileHasEnumerableStructureProperty();
1530 break;
1531 case HasEnumerableProperty:
1532 compileHasEnumerableProperty();
1533 break;
1534 case HasOwnStructureProperty:
1535 compileHasOwnStructureProperty();
1536 break;
1537 case InStructureProperty:
1538 compileInStructureProperty();
1539 break;
1540 case GetDirectPname:
1541 compileGetDirectPname();
1542 break;
1543 case GetEnumerableLength:
1544 compileGetEnumerableLength();
1545 break;
1546 case GetPropertyEnumerator:
1547 compileGetPropertyEnumerator();
1548 break;
1549 case GetEnumeratorStructurePname:
1550 compileGetEnumeratorStructurePname();
1551 break;
1552 case GetEnumeratorGenericPname:
1553 compileGetEnumeratorGenericPname();
1554 break;
1555 case ToIndexString:
1556 compileToIndexString();
1557 break;
1558 case CheckStructureImmediate:
1559 compileCheckStructureImmediate();
1560 break;
1561 case MaterializeNewObject:
1562 compileMaterializeNewObject();
1563 break;
1564 case MaterializeCreateActivation:
1565 compileMaterializeCreateActivation();
1566 break;
1567 case MaterializeNewInternalFieldObject:
1568 compileMaterializeNewInternalFieldObject();
1569 break;
1570 case CheckTraps:
1571 compileCheckTraps();
1572 break;
1573 case CreateRest:
1574 compileCreateRest();
1575 break;
1576 case GetRestLength:
1577 compileGetRestLength();
1578 break;
1579 case RegExpExec:
1580 compileRegExpExec();
1581 break;
1582 case RegExpExecNonGlobalOrSticky:
1583 compileRegExpExecNonGlobalOrSticky();
1584 break;
1585 case RegExpTest:
1586 compileRegExpTest();
1587 break;
1588 case RegExpMatchFast:
1589 compileRegExpMatchFast();
1590 break;
1591 case RegExpMatchFastGlobal:
1592 compileRegExpMatchFastGlobal();
1593 break;
1594 case NewRegexp:
1595 compileNewRegexp();
1596 break;
1597 case SetFunctionName:
1598 compileSetFunctionName();
1599 break;
1600 case StringReplace:
1601 case StringReplaceRegExp:
1602 compileStringReplace();
1603 break;
1604 case GetRegExpObjectLastIndex:
1605 compileGetRegExpObjectLastIndex();
1606 break;
1607 case SetRegExpObjectLastIndex:
1608 compileSetRegExpObjectLastIndex();
1609 break;
1610 case LogShadowChickenPrologue:
1611 compileLogShadowChickenPrologue();
1612 break;
1613 case LogShadowChickenTail:
1614 compileLogShadowChickenTail();
1615 break;
1616 case RecordRegExpCachedResult:
1617 compileRecordRegExpCachedResult();
1618 break;
1619 case ResolveScopeForHoistingFuncDeclInEval:
1620 compileResolveScopeForHoistingFuncDeclInEval();
1621 break;
1622 case ResolveScope:
1623 compileResolveScope();
1624 break;
1625 case GetDynamicVar:
1626 compileGetDynamicVar();
1627 break;
1628 case PutDynamicVar:
1629 compilePutDynamicVar();
1630 break;
1631 case Unreachable:
1632 compileUnreachable();
1633 break;
1634 case StringSlice:
1635 compileStringSlice();
1636 break;
1637 case ToLowerCase:
1638 compileToLowerCase();
1639 break;
1640 case NumberToStringWithRadix:
1641 compileNumberToStringWithRadix();
1642 break;
1643 case NumberToStringWithValidRadixConstant:
1644 compileNumberToStringWithValidRadixConstant();
1645 break;
1646 case CheckJSCast:
1647 case CheckNotJSCast:
1648 compileCheckJSCast();
1649 break;
1650 case CallDOM:
1651 compileCallDOM();
1652 break;
1653 case CallDOMGetter:
1654 compileCallDOMGetter();
1655 break;
1656 case FilterCallLinkStatus:
1657 case FilterGetByStatus:
1658 case FilterPutByIdStatus:
1659 case FilterInByStatus:
1660 case FilterDeleteByStatus:
1661 case FilterCheckPrivateBrandStatus:
1662 case FilterSetPrivateBrandStatus:
1663 compileFilterICStatus();
1664 break;
1665 case DateGetInt32OrNaN:
1666 case DateGetTime:
1667 compileDateGet();
1668 break;
1669 case DataViewGetInt:
1670 case DataViewGetFloat:
1671 compileDataViewGet();
1672 break;
1673 case DataViewSet:
1674 compileDataViewSet();
1675 break;
1676
1677 case LoopHint: {
1678 compileLoopHint();
1679 break;
1680 }
1681
1682 case PhantomLocal:
1683 case MovHint:
1684 case ExitOK:
1685 case PhantomNewObject:
1686 case PhantomNewFunction:
1687 case PhantomNewGeneratorFunction:
1688 case PhantomNewAsyncGeneratorFunction:
1689 case PhantomNewAsyncFunction:
1690 case PhantomNewInternalFieldObject:
1691 case PhantomCreateActivation:
1692 case PhantomDirectArguments:
1693 case PhantomCreateRest:
1694 case PhantomSpread:
1695 case PhantomNewArrayWithSpread:
1696 case PhantomNewArrayBuffer:
1697 case PhantomClonedArguments:
1698 case PhantomNewRegexp:
1699 case PutHint:
1700 case BottomValue:
1701 case KillStack:
1702 case InitializeEntrypointArguments:
1703 break;
1704 default:
1705 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1706 break;
1707 }
1708
1709 if (Options::validateDFGClobberize() && !m_node->isTerminal()) {
1710 bool clobberedWorld = false;
1711 auto validateClobberize = [&] () {
1712 clobberedWorld = true;
1713 };
1714
1715 clobberize(m_graph, m_node, [] (auto...) { }, [] (auto...) { }, [] (auto...) { }, validateClobberize);
1716 if (!clobberedWorld) {
1717 LValue didNotEnterVM = m_out.notZero32(m_out.load8ZeroExt32(m_out.absolute(&vm().didEnterVM)));
1718 auto* check = m_out.speculate(didNotEnterVM);
1719 check->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams&) {
1720 jit.breakpoint();
1721 });
1722 } else
1723 m_out.store(m_out.int32Zero, m_out.absolute(&vm().didEnterVM));
1724 }
1725
1726 if (m_node->isTerminal())
1727 return false;
1728
1729 if (!m_state.isValid()) {
1730 safelyInvalidateAfterTermination();
1731 return false;
1732 }
1733
1734 m_availabilityCalculator.executeNode(m_node);
1735 m_interpreter.executeEffects(nodeIndex);
1736
1737 return true;
1738 }
1739
1740 void compileUpsilon()
1741 {
1742 LValue upsilonValue = nullptr;
1743 switch (m_node->child1().useKind()) {
1744 case DoubleRepUse:
1745 upsilonValue = lowDouble(m_node->child1());
1746 break;
1747 case Int32Use:
1748 case KnownInt32Use:
1749 upsilonValue = lowInt32(m_node->child1());
1750 break;
1751 case Int52RepUse:
1752 upsilonValue = lowInt52(m_node->child1());
1753 break;
1754 case BooleanUse:
1755 case KnownBooleanUse:
1756 upsilonValue = lowBoolean(m_node->child1());
1757 break;
1758 case CellUse:
1759 case KnownCellUse:
1760 upsilonValue = lowCell(m_node->child1());
1761 break;
1762 case UntypedUse:
1763 upsilonValue = lowJSValue(m_node->child1());
1764 break;
1765 default:
1766 DFG_CRASH(m_graph, m_node, "Bad use kind");
1767 break;
1768 }
1769 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1770 LValue phiNode = m_phis.get(m_node->phi());
1771 m_out.addIncomingToPhi(phiNode, upsilon);
1772 }
1773
1774 void compilePhi()
1775 {
1776 LValue phi = m_phis.get(m_node);
1777 m_out.m_block->append(phi);
1778
1779 switch (m_node->flags() & NodeResultMask) {
1780 case NodeResultDouble:
1781 setDouble(phi);
1782 break;
1783 case NodeResultInt32:
1784 setInt32(phi);
1785 break;
1786 case NodeResultInt52:
1787 setInt52(phi);
1788 break;
1789 case NodeResultBoolean:
1790 setBoolean(phi);
1791 break;
1792 case NodeResultJS:
1793 setJSValue(phi);
1794 break;
1795 default:
1796 DFG_CRASH(m_graph, m_node, "Bad result type");
1797 break;
1798 }
1799 }
1800
1801 void compileDoubleConstant()
1802 {
1803 setDouble(m_out.constDouble(m_node->asNumber()));
1804 }
1805
1806 void compileInt52Constant()
1807 {
1808 int64_t value = m_node->asAnyInt();
1809
1810 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1811 setStrictInt52(m_out.constInt64(value));
1812 }
1813
1814 void compileLazyJSConstant()
1815 {
1816 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1817 LazyJSValue value = m_node->lazyJSValue();
1818 patchpoint->setGenerator(
1819 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1820 value.emit(jit, JSValueRegs(params[0].gpr()));
1821 });
1822 patchpoint->effects = Effects::none();
1823 setJSValue(patchpoint);
1824 }
1825
1826 void compileDoubleRep()
1827 {
1828 switch (m_node->child1().useKind()) {
1829 case RealNumberUse: {
1830 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1831
1832 LValue doubleValue = unboxDouble(value);
1833
1834 LBasicBlock intCase = m_out.newBlock();
1835 LBasicBlock continuation = m_out.newBlock();
1836
1837 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1838 m_out.branch(
1839 m_out.doubleEqual(doubleValue, doubleValue),
1840 usually(continuation), rarely(intCase));
1841
1842 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1843
1844 FTL_TYPE_CHECK(
1845 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1846 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1847 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1848 m_out.jump(continuation);
1849
1850 m_out.appendTo(continuation, lastNext);
1851
1852 setDouble(m_out.phi(Double, fastResult, slowResult));
1853 return;
1854 }
1855
1856 case NotCellNorBigIntUse:
1857 case NumberUse: {
1858 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellNorBigIntUse;
1859
1860 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1861
1862 LBasicBlock intCase = m_out.newBlock();
1863 LBasicBlock doubleTesting = m_out.newBlock();
1864 LBasicBlock doubleCase = m_out.newBlock();
1865 LBasicBlock nonDoubleCase = m_out.newBlock();
1866 LBasicBlock continuation = m_out.newBlock();
1867
1868 m_out.branch(
1869 isNotInt32(value, provenType(m_node->child1())),
1870 unsure(doubleTesting), unsure(intCase));
1871
1872 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1873
1874 ValueFromBlock intToDouble = m_out.anchor(
1875 m_out.intToDouble(unboxInt32(value)));
1876 m_out.jump(continuation);
1877
1878 m_out.appendTo(doubleTesting, doubleCase);
1879 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1880 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1881
1882 m_out.appendTo(doubleCase, nonDoubleCase);
1883 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1884 m_out.jump(continuation);
1885
1886 if (shouldConvertNonNumber) {
1887 LBasicBlock undefinedCase = m_out.newBlock();
1888 LBasicBlock testNullCase = m_out.newBlock();
1889 LBasicBlock nullCase = m_out.newBlock();
1890 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1891 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1892 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1893
1894 m_out.appendTo(nonDoubleCase, undefinedCase);
1895 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(JSValue::ValueUndefined));
1896 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1897
1898 m_out.appendTo(undefinedCase, testNullCase);
1899 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1900 m_out.jump(continuation);
1901
1902 m_out.appendTo(testNullCase, nullCase);
1903 LValue valueIsNull = m_out.equal(value, m_out.constInt64(JSValue::ValueNull));
1904 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1905
1906 m_out.appendTo(nullCase, testBooleanTrueCase);
1907 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1908 m_out.jump(continuation);
1909
1910 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1911 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(JSValue::ValueTrue));
1912 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1913
1914 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1915 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1916 m_out.jump(continuation);
1917
1918 m_out.appendTo(convertBooleanFalseCase, continuation);
1919
1920 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(JSValue::ValueFalse));
1921 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck & ~SpecBigInt, valueIsNotBooleanFalse);
1922 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1923 m_out.jump(continuation);
1924
1925 m_out.appendTo(continuation, lastNext);
1926 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1927 return;
1928 }
1929 m_out.appendTo(nonDoubleCase, continuation);
1930 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1931 m_out.unreachable();
1932
1933 m_out.appendTo(continuation, lastNext);
1934
1935 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1936 return;
1937 }
1938
1939 case Int52RepUse: {
1940 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1941 return;
1942 }
1943
1944 default:
1945 DFG_CRASH(m_graph, m_node, "Bad use kind");
1946 }
1947 }
1948
1949 void compileDoubleAsInt32()
1950 {
1951 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1952 setInt32(integerValue);
1953 }
1954
1955 void compileValueRep()
1956 {
1957 switch (m_node->child1().useKind()) {
1958 case DoubleRepUse: {
1959 LValue value = lowDouble(m_node->child1());
1960
1961 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1962 value = m_out.select(
1963 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1964 }
1965
1966 setJSValue(boxDouble(value));
1967 return;
1968 }
1969
1970 case Int52RepUse: {
1971 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1972 return;
1973 }
1974
1975 default:
1976 DFG_CRASH(m_graph, m_node, "Bad use kind");
1977 }
1978 }
1979
1980 void compileInt52Rep()
1981 {
1982 switch (m_node->child1().useKind()) {
1983 case Int32Use:
1984 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1985 return;
1986
1987 case AnyIntUse:
1988 setStrictInt52(
1989 jsValueToStrictInt52(
1990 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1991 return;
1992
1993 case DoubleRepAnyIntUse:
1994 setStrictInt52(
1995 doubleToStrictInt52(
1996 m_node->child1(), lowDouble(m_node->child1())));
1997 return;
1998
1999 default:
2000 RELEASE_ASSERT_NOT_REACHED();
2001 }
2002 }
2003
2004 void compileValueToInt32()
2005 {
2006 switch (m_node->child1().useKind()) {
2007 case Int52RepUse:
2008 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
2009 break;
2010
2011 case DoubleRepUse:
2012 setInt32(doubleToInt32(lowDouble(m_node->child1())));
2013 break;
2014
2015 case NumberUse:
2016 case NotCellNorBigIntUse: {
2017 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
2018 if (isValid(value)) {
2019 setInt32(value.value());
2020 break;
2021 }
2022
2023 value = m_jsValueValues.get(m_node->child1().node());
2024 if (isValid(value)) {
2025 setInt32(numberOrNotCellNorBigIntToInt32(m_node->child1(), value.value()));
2026 break;
2027 }
2028
2029 // We'll basically just get here for constants. But it's good to have this
2030 // catch-all since we often add new representations into the mix.
2031 setInt32(
2032 numberOrNotCellNorBigIntToInt32(
2033 m_node->child1(),
2034 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
2035 break;
2036 }
2037
2038 default:
2039 DFG_CRASH(m_graph, m_node, "Bad use kind");
2040 break;
2041 }
2042 }
2043
2044 void compileBooleanToNumber()
2045 {
2046 switch (m_node->child1().useKind()) {
2047 case BooleanUse: {
2048 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
2049 return;
2050 }
2051
2052 case UntypedUse: {
2053 LValue value = lowJSValue(m_node->child1());
2054
2055 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
2056 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
2057 return;
2058 }
2059
2060 LBasicBlock booleanCase = m_out.newBlock();
2061 LBasicBlock continuation = m_out.newBlock();
2062
2063 ValueFromBlock notBooleanResult = m_out.anchor(value);
2064 m_out.branch(
2065 isBoolean(value, provenType(m_node->child1())),
2066 unsure(booleanCase), unsure(continuation));
2067
2068 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
2069 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
2070 m_out.zeroExt(unboxBoolean(value), Int64), m_numberTag));
2071 m_out.jump(continuation);
2072
2073 m_out.appendTo(continuation, lastNext);
2074 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
2075 return;
2076 }
2077
2078 default:
2079 RELEASE_ASSERT_NOT_REACHED();
2080 return;
2081 }
2082 }
2083
2084 void compileExtractOSREntryLocal()
2085 {
2086 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
2087 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
2088 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedOperand().virtualRegister().toLocal())));
2089 }
2090
2091 void compileExtractCatchLocal()
2092 {
2093 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
2094 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
2095 }
2096
2097 void compileClearCatchLocals()
2098 {
2099 ScratchBuffer* scratchBuffer = m_ftlState.jitCode->common.catchOSREntryBuffer;
2100 ASSERT(scratchBuffer);
2101 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
2102 }
2103
2104 void compileGetStack()
2105 {
2106 StackAccessData* data = m_node->stackAccessData();
2107 AbstractValue& value = m_state.operand(data->operand);
2108
2109 DFG_ASSERT(m_graph, m_node, isConcrete(data->format), data->format);
2110
2111 switch (data->format) {
2112 case FlushedDouble:
2113 setDouble(m_out.loadDouble(addressFor(data->machineLocal)));
2114 break;
2115 case FlushedInt52:
2116 setInt52(m_out.load64(addressFor(data->machineLocal)));
2117 break;
2118 default:
2119 if (isInt32Speculation(value.m_type))
2120 setInt32(m_out.load32(payloadFor(data->machineLocal)));
2121 else
2122 setJSValue(m_out.load64(addressFor(data->machineLocal)));
2123 break;
2124 }
2125 }
2126
2127 void compilePutStack()
2128 {
2129 StackAccessData* data = m_node->stackAccessData();
2130 switch (data->format) {
2131 case FlushedJSValue: {
2132 LValue value = lowJSValue(m_node->child1());
2133 m_out.store64(value, addressFor(data->machineLocal));
2134 break;
2135 }
2136
2137 case FlushedDouble: {
2138 LValue value = lowDouble(m_node->child1());
2139 m_out.storeDouble(value, addressFor(data->machineLocal));
2140 break;
2141 }
2142
2143 case FlushedInt32: {
2144 LValue value = lowInt32(m_node->child1());
2145 m_out.store32(value, payloadFor(data->machineLocal));
2146 break;
2147 }
2148
2149 case FlushedInt52: {
2150 LValue value = lowInt52(m_node->child1());
2151 m_out.store64(value, addressFor(data->machineLocal));
2152 break;
2153 }
2154
2155 case FlushedCell: {
2156 LValue value = lowCell(m_node->child1());
2157 m_out.store64(value, addressFor(data->machineLocal));
2158 break;
2159 }
2160
2161 case FlushedBoolean: {
2162 speculateBoolean(m_node->child1());
2163 m_out.store64(
2164 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2165 addressFor(data->machineLocal));
2166 break;
2167 }
2168
2169 default:
2170 DFG_CRASH(m_graph, m_node, "Bad flush format");
2171 break;
2172 }
2173 }
2174
2175 void compileNoOp()
2176 {
2177 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
2178 }
2179
2180 void compileToObjectOrCallObjectConstructor()
2181 {
2182 LValue value = lowJSValue(m_node->child1());
2183
2184 LBasicBlock isCellCase = m_out.newBlock();
2185 LBasicBlock slowCase = m_out.newBlock();
2186 LBasicBlock continuation = m_out.newBlock();
2187
2188 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2189
2190 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2191 ValueFromBlock fastResult = m_out.anchor(value);
2192 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
2193
2194 m_out.appendTo(slowCase, continuation);
2195
2196 ValueFromBlock slowResult;
2197 if (m_node->op() == ToObject) {
2198 UniquedStringImpl* errorMessage = nullptr;
2199 if (m_node->identifierNumber() != UINT32_MAX)
2200 errorMessage = m_graph.identifiers()[m_node->identifierNumber()];
2201 auto* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2202 slowResult = m_out.anchor(vmCall(Int64, operationToObject, weakPointer(globalObject), value, m_out.constIntPtr(errorMessage)));
2203 } else
2204 slowResult = m_out.anchor(vmCall(Int64, operationCallObjectConstructor, frozenPointer(m_node->cellOperand()), value));
2205 m_out.jump(continuation);
2206
2207 m_out.appendTo(continuation, lastNext);
2208 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2209 }
2210
2211 void compileToThis()
2212 {
2213 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2214
2215 LValue value = lowJSValue(m_node->child1());
2216
2217 LBasicBlock isCellCase = m_out.newBlock();
2218 LBasicBlock slowCase = m_out.newBlock();
2219 LBasicBlock continuation = m_out.newBlock();
2220
2221 m_out.branch(
2222 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2223
2224 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2225 ValueFromBlock fastResult = m_out.anchor(value);
2226 m_out.branch(
2227 m_out.testIsZero32(
2228 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
2229 m_out.constInt32(OverridesToThis)),
2230 usually(continuation), rarely(slowCase));
2231
2232 m_out.appendTo(slowCase, continuation);
2233 J_JITOperation_GJ function;
2234 if (m_node->ecmaMode().isStrict())
2235 function = operationToThisStrict;
2236 else
2237 function = operationToThis;
2238 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, function, weakPointer(globalObject), value));
2239 m_out.jump(continuation);
2240
2241 m_out.appendTo(continuation, lastNext);
2242 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2243 }
2244
2245 void compileValueAdd()
2246 {
2247 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2248
2249#if USE(BIGINT32)
2250 // FIXME: Introduce another BigInt32 code generation: binary use kinds are BigIntUse32, but result is SpecAnyInt and accepting overflow.
2251 // Let's distinguish these modes based on result type information by introducing NodeResultBigInt32.
2252 // https://bugs.webkit.org/show_bug.cgi?id=210957
2253 // https://bugs.webkit.org/show_bug.cgi?id=211040
2254 if (m_node->isBinaryUseKind(BigInt32Use)) {
2255 LValue left = lowBigInt32(m_node->child1());
2256 LValue right = lowBigInt32(m_node->child2());
2257
2258 LValue unboxedLeft = unboxBigInt32(left);
2259 LValue unboxedRight = unboxBigInt32(right);
2260
2261 CheckValue* result = m_out.speculateAdd(unboxedLeft, unboxedRight);
2262 blessSpeculation(result, BigInt32Overflow, noValue(), nullptr, m_origin);
2263
2264 LValue boxedResult = boxBigInt32(result);
2265 setJSValue(boxedResult);
2266 return;
2267 }
2268#endif
2269
2270 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
2271 LValue left = lowHeapBigInt(m_node->child1());
2272 LValue right = lowHeapBigInt(m_node->child2());
2273
2274 LValue result = vmCall(pointerType(), operationAddHeapBigInt, weakPointer(globalObject), left, right);
2275 setJSValue(result);
2276 return;
2277 }
2278
2279 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_origin.semantic);
2280 BytecodeIndex bytecodeIndex = m_origin.semantic.bytecodeIndex();
2281 BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
2282 auto repatchingFunction = operationValueAddOptimize;
2283 auto nonRepatchingFunction = operationValueAdd;
2284 compileBinaryMathIC<JITAddGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2285 }
2286
2287 void compileValueSub()
2288 {
2289 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2290
2291#if USE(BIGINT32)
2292 // FIXME: Introduce another BigInt32 code generation: binary use kinds are BigIntUse32, but result is SpecAnyInt and accepting overflow.
2293 // Let's distinguish these modes based on result type information by introducing NodeResultBigInt32.
2294 // https://bugs.webkit.org/show_bug.cgi?id=210957
2295 // https://bugs.webkit.org/show_bug.cgi?id=211040
2296 if (m_node->isBinaryUseKind(BigInt32Use)) {
2297 LValue left = lowBigInt32(m_node->child1());
2298 LValue right = lowBigInt32(m_node->child2());
2299
2300 LValue unboxedLeft = unboxBigInt32(left);
2301 LValue unboxedRight = unboxBigInt32(right);
2302
2303 CheckValue* result = m_out.speculateSub(unboxedLeft, unboxedRight);
2304 blessSpeculation(result, BigInt32Overflow, noValue(), nullptr, m_origin);
2305
2306 LValue boxedResult = boxBigInt32(result);
2307 setJSValue(boxedResult);
2308 return;
2309 }
2310#endif
2311
2312 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
2313 LValue left = lowHeapBigInt(m_node->child1());
2314 LValue right = lowHeapBigInt(m_node->child2());
2315
2316 LValue result = vmCall(pointerType(), operationSubHeapBigInt, weakPointer(globalObject), left, right);
2317 setJSValue(result);
2318 return;
2319 }
2320
2321 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_origin.semantic);
2322 BytecodeIndex bytecodeIndex = m_origin.semantic.bytecodeIndex();
2323 BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
2324 auto repatchingFunction = operationValueSubOptimize;
2325 auto nonRepatchingFunction = operationValueSub;
2326 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2327 }
2328
2329 void compileValueMul()
2330 {
2331 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2332
2333#if USE(BIGINT32)
2334 // FIXME: Introduce another BigInt32 code generation: binary use kinds are BigIntUse32, but result is SpecAnyInt and accepting overflow.
2335 // Let's distinguish these modes based on result type information by introducing NodeResultBigInt32.
2336 // https://bugs.webkit.org/show_bug.cgi?id=210957
2337 // https://bugs.webkit.org/show_bug.cgi?id=211040
2338 if (m_node->isBinaryUseKind(BigInt32Use)) {
2339 LValue left = lowBigInt32(m_node->child1());
2340 LValue right = lowBigInt32(m_node->child2());
2341
2342 LValue unboxedLeft = unboxBigInt32(left);
2343 LValue unboxedRight = unboxBigInt32(right);
2344
2345 CheckValue* result = m_out.speculateMul(unboxedLeft, unboxedRight);
2346 blessSpeculation(result, BigInt32Overflow, noValue(), nullptr, m_origin);
2347
2348 LValue boxedResult = boxBigInt32(result);
2349 setJSValue(boxedResult);
2350 return;
2351 }
2352#endif
2353
2354 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
2355 LValue left = lowHeapBigInt(m_node->child1());
2356 LValue right = lowHeapBigInt(m_node->child2());
2357
2358 LValue result = vmCall(Int64, operationMulHeapBigInt, weakPointer(globalObject), left, right);
2359 setJSValue(result);
2360 return;
2361 }
2362
2363 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_origin.semantic);
2364 BytecodeIndex bytecodeIndex = m_origin.semantic.bytecodeIndex();
2365 BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
2366 auto repatchingFunction = operationValueMulOptimize;
2367 auto nonRepatchingFunction = operationValueMul;
2368 compileBinaryMathIC<JITMulGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2369 }
2370
2371 template <typename Generator, typename Func1, typename Func2,
2372 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2373 void compileUnaryMathIC(UnaryArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2374 {
2375 Node* node = m_node;
2376
2377 LValue operand = lowJSValue(node->child1());
2378
2379 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2380 patchpoint->appendSomeRegister(operand);
2381 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
2382 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
2383 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
2384 patchpoint->numGPScratchRegisters = 1;
2385 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2386 State* state = &m_ftlState;
2387 CodeOrigin semanticNodeOrigin = node->origin.semantic;
2388 patchpoint->setGenerator(
2389 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2390 AllowMacroScratchRegisterUsage allowScratch(jit);
2391
2392 Box<CCallHelpers::JumpList> exceptions =
2393 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2394
2395#if ENABLE(MATH_IC_STATS)
2396 auto inlineStart = jit.label();
2397#endif
2398
2399 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2400 JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2401 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
2402
2403 bool shouldEmitProfiling = false;
2404 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2405
2406 if (generatedInline) {
2407 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2408 auto done = jit.label();
2409 params.addLatePath([=] (CCallHelpers& jit) {
2410 AllowMacroScratchRegisterUsage allowScratch(jit);
2411 mathICGenerationState->slowPathJumps.link(&jit);
2412 mathICGenerationState->slowPathStart = jit.label();
2413#if ENABLE(MATH_IC_STATS)
2414 auto slowPathStart = jit.label();
2415#endif
2416
2417 if (mathICGenerationState->shouldSlowPathRepatch) {
2418 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, semanticNodeOrigin, exceptions.get(),
2419 repatchingFunction, params[0].gpr(), jit.codeBlock()->globalObjectFor(semanticNodeOrigin), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2420 mathICGenerationState->slowPathCall = call.call();
2421 } else {
2422 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, semanticNodeOrigin,
2423 exceptions.get(), nonRepatchingFunction, params[0].gpr(), jit.codeBlock()->globalObjectFor(semanticNodeOrigin), params[1].gpr());
2424 mathICGenerationState->slowPathCall = call.call();
2425 }
2426 jit.jump().linkTo(done, &jit);
2427
2428 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2429 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2430 });
2431
2432#if ENABLE(MATH_IC_STATS)
2433 auto slowPathEnd = jit.label();
2434 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2435 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2436 mathIC->m_generatedCodeSize += size;
2437 });
2438#endif
2439 });
2440 } else {
2441 callOperation(
2442 *state, params.unavailableRegisters(), jit, semanticNodeOrigin, exceptions.get(),
2443 nonRepatchingFunction, params[0].gpr(), jit.codeBlock()->globalObjectFor(semanticNodeOrigin), params[1].gpr());
2444 }
2445
2446#if ENABLE(MATH_IC_STATS)
2447 auto inlineEnd = jit.label();
2448 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2449 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2450 mathIC->m_generatedCodeSize += size;
2451 });
2452#endif
2453 });
2454
2455 setJSValue(patchpoint);
2456 }
2457
2458 template <typename Generator, typename Func1, typename Func2,
2459 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2460 void compileBinaryMathIC(BinaryArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2461 {
2462 Node* node = m_node;
2463
2464#if USE(BIGINT32)
2465 if (node->isBinaryUseKind(AnyBigIntUse)) {
2466 // FIXME: This is not supported by the IC yet.
2467 LValue left = lowJSValue(node->child1(), ManualOperandSpeculation);
2468 LValue right = lowJSValue(node->child2(), ManualOperandSpeculation);
2469 speculate(node, node->child1());
2470 speculate(node, node->child2());
2471
2472 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2473 setJSValue(vmCall(pointerType(), nonRepatchingFunction, weakPointer(globalObject), left, right));
2474 return;
2475 }
2476#endif
2477
2478 LValue left = lowJSValue(node->child1());
2479 LValue right = lowJSValue(node->child2());
2480
2481 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2482 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2483
2484 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2485 patchpoint->appendSomeRegister(left);
2486 patchpoint->appendSomeRegister(right);
2487 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
2488 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
2489 RefPtr<PatchpointExceptionHandle> exceptionHandle =
2490 preparePatchpointForExceptions(patchpoint);
2491 patchpoint->numGPScratchRegisters = 1;
2492 patchpoint->numFPScratchRegisters = 2;
2493 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2494 State* state = &m_ftlState;
2495 CodeOrigin semanticNodeOrigin = node->origin.semantic;
2496 patchpoint->setGenerator(
2497 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2498 AllowMacroScratchRegisterUsage allowScratch(jit);
2499
2500
2501 Box<CCallHelpers::JumpList> exceptions =
2502 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2503
2504#if ENABLE(MATH_IC_STATS)
2505 auto inlineStart = jit.label();
2506#endif
2507
2508 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2509 JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2510 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
2511 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
2512 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
2513
2514 bool shouldEmitProfiling = false;
2515 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2516
2517 if (generatedInline) {
2518 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2519 auto done = jit.label();
2520 params.addLatePath([=] (CCallHelpers& jit) {
2521 AllowMacroScratchRegisterUsage allowScratch(jit);
2522 mathICGenerationState->slowPathJumps.link(&jit);
2523 mathICGenerationState->slowPathStart = jit.label();
2524#if ENABLE(MATH_IC_STATS)
2525 auto slowPathStart = jit.label();
2526#endif
2527
2528 if (mathICGenerationState->shouldSlowPathRepatch) {
2529 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, semanticNodeOrigin, exceptions.get(),
2530 repatchingFunction, params[0].gpr(), jit.codeBlock()->globalObjectFor(semanticNodeOrigin), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2531 mathICGenerationState->slowPathCall = call.call();
2532 } else {
2533 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, semanticNodeOrigin,
2534 exceptions.get(), nonRepatchingFunction, params[0].gpr(), jit.codeBlock()->globalObjectFor(semanticNodeOrigin), params[1].gpr(), params[2].gpr());
2535 mathICGenerationState->slowPathCall = call.call();
2536 }
2537 jit.jump().linkTo(done, &jit);
2538
2539 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2540 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2541 });
2542
2543#if ENABLE(MATH_IC_STATS)
2544 auto slowPathEnd = jit.label();
2545 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2546 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2547 mathIC->m_generatedCodeSize += size;
2548 });
2549#endif
2550 });
2551 } else {
2552 callOperation(
2553 *state, params.unavailableRegisters(), jit, semanticNodeOrigin, exceptions.get(),
2554 nonRepatchingFunction, params[0].gpr(), jit.codeBlock()->globalObjectFor(semanticNodeOrigin), params[1].gpr(), params[2].gpr());
2555 }
2556
2557#if ENABLE(MATH_IC_STATS)
2558 auto inlineEnd = jit.label();
2559 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2560 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2561 mathIC->m_generatedCodeSize += size;
2562 });
2563#endif
2564 });
2565
2566 setJSValue(patchpoint);
2567 }
2568
2569 void compileStrCat()
2570 {
2571 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2572
2573 LValue result;
2574 if (m_node->child3()) {
2575 result = vmCall(
2576 Int64, operationStrCat3, weakPointer(globalObject),
2577 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2578 lowJSValue(m_node->child2(), ManualOperandSpeculation),
2579 lowJSValue(m_node->child3(), ManualOperandSpeculation));
2580 } else {
2581 result = vmCall(
2582 Int64, operationStrCat2, weakPointer(globalObject),
2583 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2584 lowJSValue(m_node->child2(), ManualOperandSpeculation));
2585 }
2586 setJSValue(result);
2587 }
2588
2589 void compileArithAddOrSub()
2590 {
2591 bool isSub = m_node->op() == ArithSub;
2592 switch (m_node->binaryUseKind()) {
2593 case Int32Use: {
2594 LValue left = lowInt32(m_node->child1());
2595 LValue right = lowInt32(m_node->child2());
2596
2597 if (!shouldCheckOverflow(m_node->arithMode())) {
2598 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
2599 break;
2600 }
2601
2602 CheckValue* result =
2603 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2604 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2605 setInt32(result);
2606 break;
2607 }
2608
2609 case Int52RepUse: {
2610 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)
2611 && !abstractValue(m_node->child2()).couldBeType(SpecNonInt32AsInt52)) {
2612 Int52Kind kind;
2613 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2614 LValue right = lowInt52(m_node->child2(), kind);
2615 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
2616 break;
2617 }
2618
2619 LValue left = lowInt52(m_node->child1());
2620 LValue right = lowInt52(m_node->child2());
2621 CheckValue* result =
2622 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2623 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2624 setInt52(result);
2625 break;
2626 }
2627
2628 case DoubleRepUse: {
2629 LValue C1 = lowDouble(m_node->child1());
2630 LValue C2 = lowDouble(m_node->child2());
2631
2632 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2633 break;
2634 }
2635
2636 case UntypedUse: {
2637 if (!isSub) {
2638 DFG_CRASH(m_graph, m_node, "Bad use kind");
2639 break;
2640 }
2641
2642 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_origin.semantic);
2643 BytecodeIndex bytecodeIndex = m_origin.semantic.bytecodeIndex();
2644 BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
2645 auto repatchingFunction = operationValueSubOptimize;
2646 auto nonRepatchingFunction = operationValueSub;
2647 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2648 break;
2649 }
2650
2651 default:
2652 DFG_CRASH(m_graph, m_node, "Bad use kind");
2653 break;
2654 }
2655 }
2656
2657 void compileArithClz32()
2658 {
2659 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2660 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2661 LValue operand = lowInt32(m_node->child1());
2662 setInt32(m_out.ctlz32(operand));
2663 return;
2664 }
2665 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2666 LValue argument = lowJSValue(m_node->child1());
2667 LValue result = m_out.castToInt32(vmCall(Int64, operationArithClz32, weakPointer(globalObject), argument));
2668 setInt32(result);
2669 }
2670
2671 void compileArithMul()
2672 {
2673 switch (m_node->binaryUseKind()) {
2674 case Int32Use: {
2675 LValue left = lowInt32(m_node->child1());
2676 LValue right = lowInt32(m_node->child2());
2677
2678 LValue result;
2679
2680 if (!shouldCheckOverflow(m_node->arithMode()))
2681 result = m_out.mul(left, right);
2682 else {
2683 CheckValue* speculation = m_out.speculateMul(left, right);
2684 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2685 result = speculation;
2686 }
2687
2688 if (shouldCheckNegativeZero(m_node->arithMode())) {
2689 LBasicBlock slowCase = m_out.newBlock();
2690 LBasicBlock continuation = m_out.newBlock();
2691
2692 m_out.branch(
2693 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2694
2695 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2696 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2697 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2698 m_out.jump(continuation);
2699 m_out.appendTo(continuation, lastNext);
2700 }
2701
2702 setInt32(result);
2703 break;
2704 }
2705
2706 case Int52RepUse: {
2707 Int52Kind kind;
2708 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2709 LValue right = lowInt52(m_node->child2(), opposite(kind));
2710
2711 CheckValue* result = m_out.speculateMul(left, right);
2712 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2713
2714 if (shouldCheckNegativeZero(m_node->arithMode())) {
2715 LBasicBlock slowCase = m_out.newBlock();
2716 LBasicBlock continuation = m_out.newBlock();
2717
2718 m_out.branch(
2719 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2720
2721 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2722 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2723 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2724 m_out.jump(continuation);
2725 m_out.appendTo(continuation, lastNext);
2726 }
2727
2728 setInt52(result);
2729 break;
2730 }
2731
2732 case DoubleRepUse: {
2733 setDouble(
2734 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2735 break;
2736 }
2737
2738 default:
2739 DFG_CRASH(m_graph, m_node, "Bad use kind");
2740 break;
2741 }
2742 }
2743
2744 void compileValueDiv()
2745 {
2746 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2747 // FIXME: add a fast path for BigInt32 here
2748 // https://bugs.webkit.org/show_bug.cgi?id=211041
2749 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
2750 LValue left = lowHeapBigInt(m_node->child1());
2751 LValue right = lowHeapBigInt(m_node->child2());
2752
2753 LValue result = vmCall(pointerType(), operationDivHeapBigInt, weakPointer(globalObject), left, right);
2754 setJSValue(result);
2755 return;
2756 }
2757
2758 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2759 }
2760
2761 void compileArithDiv()
2762 {
2763 switch (m_node->binaryUseKind()) {
2764 case Int32Use: {
2765 LValue numerator = lowInt32(m_node->child1());
2766 LValue denominator = lowInt32(m_node->child2());
2767
2768 if (shouldCheckNegativeZero(m_node->arithMode())) {
2769 LBasicBlock zeroNumerator = m_out.newBlock();
2770 LBasicBlock numeratorContinuation = m_out.newBlock();
2771
2772 m_out.branch(
2773 m_out.isZero32(numerator),
2774 rarely(zeroNumerator), usually(numeratorContinuation));
2775
2776 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2777
2778 speculate(
2779 NegativeZero, noValue(), nullptr, m_out.lessThan(denominator, m_out.int32Zero));
2780
2781 m_out.jump(numeratorContinuation);
2782
2783 m_out.appendTo(numeratorContinuation, innerLastNext);
2784 }
2785
2786 if (shouldCheckOverflow(m_node->arithMode())) {
2787 LBasicBlock unsafeDenominator = m_out.newBlock();
2788 LBasicBlock continuation = m_out.newBlock();
2789
2790 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2791 m_out.branch(
2792 m_out.above(adjustedDenominator, m_out.int32One),
2793 usually(continuation), rarely(unsafeDenominator));
2794
2795 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2796 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2797 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2798 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2799 m_out.jump(continuation);
2800
2801 m_out.appendTo(continuation, lastNext);
2802 LValue result = m_out.div(numerator, denominator);
2803 speculate(
2804 Overflow, noValue(), nullptr,
2805 m_out.notEqual(m_out.mul(result, denominator), numerator));
2806 setInt32(result);
2807 } else
2808 setInt32(m_out.chillDiv(numerator, denominator));
2809
2810 break;
2811 }
2812
2813 case DoubleRepUse: {
2814 setDouble(m_out.doubleDiv(
2815 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2816 break;
2817 }
2818
2819 default:
2820 DFG_CRASH(m_graph, m_node, "Bad use kind");
2821 break;
2822 }
2823 }
2824
2825 void compileValueMod()
2826 {
2827 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2828 // FIXME: add a BigInt32 fast path here
2829 if (m_node->binaryUseKind() == HeapBigIntUse) {
2830 LValue left = lowHeapBigInt(m_node->child1());
2831 LValue right = lowHeapBigInt(m_node->child2());
2832
2833 LValue result = vmCall(pointerType(), operationModHeapBigInt, weakPointer(globalObject), left, right);
2834 setJSValue(result);
2835 return;
2836 }
2837
2838 DFG_ASSERT(m_graph, m_node, m_node->binaryUseKind() == UntypedUse || m_node->binaryUseKind() == AnyBigIntUse, m_node->binaryUseKind());
2839 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
2840 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
2841 speculate(m_node, m_node->child1());
2842 speculate(m_node, m_node->child2());
2843 LValue result = vmCall(Int64, operationValueMod, weakPointer(globalObject), left, right);
2844 setJSValue(result);
2845 }
2846
2847 void compileArithMod()
2848 {
2849 switch (m_node->binaryUseKind()) {
2850 case Int32Use: {
2851 LValue numerator = lowInt32(m_node->child1());
2852 LValue denominator = lowInt32(m_node->child2());
2853
2854 LValue remainder;
2855 if (shouldCheckOverflow(m_node->arithMode())) {
2856 LBasicBlock unsafeDenominator = m_out.newBlock();
2857 LBasicBlock continuation = m_out.newBlock();
2858
2859 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2860 m_out.branch(
2861 m_out.above(adjustedDenominator, m_out.int32One),
2862 usually(continuation), rarely(unsafeDenominator));
2863
2864 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2865 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2866 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2867 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2868 m_out.jump(continuation);
2869
2870 m_out.appendTo(continuation, lastNext);
2871 LValue result = m_out.mod(numerator, denominator);
2872 remainder = result;
2873 } else
2874 remainder = m_out.chillMod(numerator, denominator);
2875
2876 if (shouldCheckNegativeZero(m_node->arithMode())) {
2877 LBasicBlock negativeNumerator = m_out.newBlock();
2878 LBasicBlock numeratorContinuation = m_out.newBlock();
2879
2880 m_out.branch(
2881 m_out.lessThan(numerator, m_out.int32Zero),
2882 unsure(negativeNumerator), unsure(numeratorContinuation));
2883
2884 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2885
2886 speculate(NegativeZero, noValue(), nullptr, m_out.isZero32(remainder));
2887
2888 m_out.jump(numeratorContinuation);
2889
2890 m_out.appendTo(numeratorContinuation, innerLastNext);
2891 }
2892
2893 setInt32(remainder);
2894 break;
2895 }
2896
2897 case DoubleRepUse: {
2898 setDouble(
2899 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2900 break;
2901 }
2902
2903 default:
2904 DFG_CRASH(m_graph, m_node, "Bad use kind");
2905 break;
2906 }
2907 }
2908
2909 void compileArithMinOrMax()
2910 {
2911 switch (m_node->binaryUseKind()) {
2912 case Int32Use: {
2913 LValue left = lowInt32(m_node->child1());
2914 LValue right = lowInt32(m_node->child2());
2915
2916 setInt32(
2917 m_out.select(
2918 m_node->op() == ArithMin
2919 ? m_out.lessThan(left, right)
2920 : m_out.lessThan(right, left),
2921 left, right));
2922 break;
2923 }
2924
2925 case DoubleRepUse: {
2926 LValue left = lowDouble(m_node->child1());
2927 LValue right = lowDouble(m_node->child2());
2928
2929 LBasicBlock notLessThan = m_out.newBlock();
2930 LBasicBlock isEqual = m_out.newBlock();
2931 LBasicBlock notEqual = m_out.newBlock();
2932 LBasicBlock continuation = m_out.newBlock();
2933
2934 Vector<ValueFromBlock, 2> results;
2935
2936 results.append(m_out.anchor(left));
2937 m_out.branch(
2938 m_node->op() == ArithMin
2939 ? m_out.doubleLessThan(left, right)
2940 : m_out.doubleGreaterThan(left, right),
2941 unsure(continuation), unsure(notLessThan));
2942
2943 // The spec for Math.min and Math.max states that +0 is considered to be larger than -0.
2944 LBasicBlock lastNext = m_out.appendTo(notLessThan, isEqual);
2945 m_out.branch(
2946 m_out.doubleEqual(left, right),
2947 rarely(isEqual), usually(notEqual));
2948
2949 lastNext = m_out.appendTo(isEqual, notEqual);
2950 results.append(m_out.anchor(
2951 m_node->op() == ArithMin
2952 ? m_out.bitOr(left, right)
2953 : m_out.bitAnd(left, right)));
2954 m_out.jump(continuation);
2955
2956 lastNext = m_out.appendTo(notEqual, continuation);
2957 results.append(
2958 m_out.anchor(
2959 m_out.select(
2960 m_node->op() == ArithMin
2961 ? m_out.doubleGreaterThan(left, right)
2962 : m_out.doubleLessThan(left, right),
2963 right, m_out.constDouble(PNaN))));
2964 m_out.jump(continuation);
2965
2966 m_out.appendTo(continuation, lastNext);
2967 setDouble(m_out.phi(Double, results));
2968 break;
2969 }
2970
2971 default:
2972 DFG_CRASH(m_graph, m_node, "Bad use kind");
2973 break;
2974 }
2975 }
2976
2977 void compileArithAbs()
2978 {
2979 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
2980 switch (m_node->child1().useKind()) {
2981 case Int32Use: {
2982 LValue value = lowInt32(m_node->child1());
2983
2984 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2985 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2986
2987 if (shouldCheckOverflow(m_node->arithMode()))
2988 speculate(Overflow, noValue(), nullptr, m_out.lessThan(result, m_out.int32Zero));
2989
2990 setInt32(result);
2991 break;
2992 }
2993
2994 case DoubleRepUse: {
2995 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2996 break;
2997 }
2998
2999 default: {
3000 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
3001 LValue argument = lowJSValue(m_node->child1());
3002 LValue result = vmCall(Double, operationArithAbs, weakPointer(globalObject), argument);
3003 setDouble(result);
3004 break;
3005 }
3006 }
3007 }
3008
3009 void compileArithUnary()
3010 {
3011 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3012 if (m_node->child1().useKind() == DoubleRepUse) {
3013 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
3014 return;
3015 }
3016 LValue argument = lowJSValue(m_node->child1());
3017 LValue result = vmCall(Double, DFG::arithUnaryOperation(m_node->arithUnaryType()), weakPointer(globalObject), argument);
3018 setDouble(result);
3019 }
3020
3021 void compileValuePow()
3022 {
3023 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3024 // FIXME: maybe add a fast path for BigInt32 here
3025 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
3026 LValue base = lowHeapBigInt(m_node->child1());
3027 LValue exponent = lowHeapBigInt(m_node->child2());
3028
3029 LValue result = vmCall(pointerType(), operationPowHeapBigInt, weakPointer(globalObject), base, exponent);
3030 setJSValue(result);
3031 return;
3032 }
3033
3034 ASSERT(m_node->isBinaryUseKind(UntypedUse) || m_node->isBinaryUseKind(AnyBigIntUse));
3035 LValue base = lowJSValue(m_node->child1(), ManualOperandSpeculation);
3036 LValue exponent = lowJSValue(m_node->child2(), ManualOperandSpeculation);
3037 speculate(m_node, m_node->child1());
3038 speculate(m_node, m_node->child2());
3039 LValue result = vmCall(Int64, operationValuePow, weakPointer(globalObject), base, exponent);
3040 setJSValue(result);
3041 }
3042
3043 void compileArithPow()
3044 {
3045 if (m_node->child2().useKind() == Int32Use)
3046 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
3047 else {
3048 LValue base = lowDouble(m_node->child1());
3049 LValue exponent = lowDouble(m_node->child2());
3050
3051 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
3052 LBasicBlock integerExponentPowBlock = m_out.newBlock();
3053 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
3054 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
3055 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
3056 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
3057 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
3058 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
3059 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
3060 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
3061 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
3062 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
3063 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
3064 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
3065 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
3066 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
3067 LBasicBlock powBlock = m_out.newBlock();
3068 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
3069 LBasicBlock continuation = m_out.newBlock();
3070
3071 LValue integerExponent = m_out.doubleToInt(exponent);
3072 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
3073 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
3074 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
3075
3076 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
3077 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
3078 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
3079
3080 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
3081 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
3082 m_out.jump(continuation);
3083
3084 // If y is NaN, the result is NaN.
3085 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
3086 LValue exponentIsNaN;
3087 if (provenType(m_node->child2()) & SpecDoubleNaN)
3088 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
3089 else
3090 exponentIsNaN = m_out.booleanFalse;
3091 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
3092
3093 // If abs(x) is 1 and y is +infinity, the result is NaN.
3094 // If abs(x) is 1 and y is -infinity, the result is NaN.
3095
3096 // Test if base == 1.
3097 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
3098 LValue absoluteBase = m_out.doubleAbs(base);
3099 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
3100 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
3101
3102 // Test if abs(y) == Infinity.
3103 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
3104 LValue absoluteExponent = m_out.doubleAbs(exponent);
3105 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
3106 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
3107
3108 // If y == 0.5 or y == -0.5, handle it through SQRT.
3109 // We have be carefuly with -0 and -Infinity.
3110
3111 // Test if y == 0.5
3112 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
3113 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
3114 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
3115
3116 // Handle x == -0.
3117 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
3118 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
3119 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
3120 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
3121
3122 // Test if abs(x) == Infinity.
3123 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
3124 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
3125 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
3126
3127 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
3128 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
3129 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
3130 m_out.jump(continuation);
3131
3132 // The exponent is 0.5, the base is infinite, the result is always infinite.
3133 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
3134 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
3135 m_out.jump(continuation);
3136
3137 // Test if y == -0.5
3138 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
3139 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
3140 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
3141
3142 // Handle x == -0.
3143 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
3144 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
3145 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
3146
3147 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
3148 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
3149 m_out.jump(continuation);
3150
3151 // Test if abs(x) == Infinity.
3152 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
3153 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
3154 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
3155
3156 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
3157 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
3158 LValue sqrtBase = m_out.doubleSqrt(base);
3159 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
3160 m_out.jump(continuation);
3161
3162 // The exponent is -0.5, the base is infinite, the result is always zero.
3163 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
3164 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
3165 m_out.jump(continuation);
3166
3167 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
3168 ValueFromBlock powResult = m_out.anchor(m_out.doubleStdPow(base, exponent));
3169 m_out.jump(continuation);
3170
3171 m_out.appendTo(nanExceptionResultIsNaN, continuation);
3172 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
3173 m_out.jump(continuation);
3174
3175 m_out.appendTo(continuation, lastNext);
3176 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
3177 }
3178 }
3179
3180 void compileArithRandom()
3181 {
3182 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3183
3184 // Inlined WeakRandom::advance().
3185 // uint64_t x = m_low;
3186 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
3187 LValue low = m_out.load64(m_out.absolute(lowAddress));
3188 // uint64_t y = m_high;
3189 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
3190 LValue high = m_out.load64(m_out.absolute(highAddress));
3191 // m_low = y;
3192 m_out.store64(high, m_out.absolute(lowAddress));
3193
3194 // x ^= x << 23;
3195 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
3196
3197 // x ^= x >> 17;
3198 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
3199
3200 // x ^= y ^ (y >> 26);
3201 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
3202
3203 // m_high = x;
3204 m_out.store64(phase3, m_out.absolute(highAddress));
3205
3206 // return x + y;
3207 LValue random64 = m_out.add(phase3, high);
3208
3209 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
3210 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
3211
3212 LValue double53Integer = m_out.intToDouble(random53);
3213
3214 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
3215 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
3216 static constexpr double scale = 1.0 / (1ULL << 53);
3217
3218 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
3219 // It just reduces the exp part of the given 53bit double integer.
3220 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
3221 // Now we get 53bit precision random double value in [0, 1).
3222 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
3223
3224 setDouble(result);
3225 }
3226
3227 void compileArithRound()
3228 {
3229 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3230 if (m_node->child1().useKind() == DoubleRepUse) {
3231 LValue result = nullptr;
3232 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
3233 LValue value = lowDouble(m_node->child1());
3234 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
3235 } else {
3236 LBasicBlock shouldRoundDown = m_out.newBlock();
3237 LBasicBlock continuation = m_out.newBlock();
3238
3239 LValue value = lowDouble(m_node->child1());
3240 LValue integerValue = m_out.doubleCeil(value);
3241 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
3242
3243 LValue ceilMinusHalf = m_out.doubleSub(integerValue, m_out.constDouble(0.5));
3244 m_out.branch(m_out.doubleGreaterThanOrUnordered(ceilMinusHalf, value), unsure(shouldRoundDown), unsure(continuation));
3245
3246 LBasicBlock lastNext = m_out.appendTo(shouldRoundDown, continuation);
3247 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
3248 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
3249 m_out.jump(continuation);
3250 m_out.appendTo(continuation, lastNext);
3251
3252 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
3253 }
3254
3255 if (producesInteger(m_node->arithRoundingMode())) {
3256 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
3257 setInt32(integerValue);
3258 } else
3259 setDouble(result);
3260 return;
3261 }
3262
3263 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
3264 LValue argument = lowJSValue(m_node->child1());
3265 setJSValue(vmCall(Int64, operationArithRound, weakPointer(globalObject), argument));
3266 }
3267
3268 void compileArithFloor()
3269 {
3270 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3271 if (m_node->child1().useKind() == DoubleRepUse) {
3272 LValue value = lowDouble(m_node->child1());
3273 LValue integerValue = m_out.doubleFloor(value);
3274 if (producesInteger(m_node->arithRoundingMode()))
3275 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
3276 else
3277 setDouble(integerValue);
3278 return;
3279 }
3280 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
3281 LValue argument = lowJSValue(m_node->child1());
3282 setJSValue(vmCall(Int64, operationArithFloor, weakPointer(globalObject), argument));
3283 }
3284
3285 void compileArithCeil()
3286 {
3287 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3288 if (m_node->child1().useKind() == DoubleRepUse) {
3289 LValue value = lowDouble(m_node->child1());
3290 LValue integerValue = m_out.doubleCeil(value);
3291 if (producesInteger(m_node->arithRoundingMode()))
3292 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
3293 else
3294 setDouble(integerValue);
3295 return;
3296 }
3297 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
3298 LValue argument = lowJSValue(m_node->child1());
3299 setJSValue(vmCall(Int64, operationArithCeil, weakPointer(globalObject), argument));
3300 }
3301
3302 void compileArithTrunc()
3303 {
3304 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3305 if (m_node->child1().useKind() == DoubleRepUse) {
3306 LValue value = lowDouble(m_node->child1());
3307 LValue result = m_out.doubleTrunc(value);
3308 if (producesInteger(m_node->arithRoundingMode()))
3309 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
3310 else
3311 setDouble(result);
3312 return;
3313 }
3314 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
3315 LValue argument = lowJSValue(m_node->child1());
3316 setJSValue(vmCall(Int64, operationArithTrunc, weakPointer(globalObject), argument));
3317 }
3318
3319 void compileArithSqrt()
3320 {
3321 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3322 if (m_node->child1().useKind() == DoubleRepUse) {
3323 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
3324 return;
3325 }
3326 LValue argument = lowJSValue(m_node->child1());
3327 LValue result = vmCall(Double, operationArithSqrt, weakPointer(globalObject), argument);
3328 setDouble(result);
3329 }
3330
3331 void compileArithFRound()
3332 {
3333 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3334 if (m_node->child1().useKind() == DoubleRepUse) {
3335 setDouble(m_out.fround(lowDouble(m_node->child1())));
3336 return;
3337 }
3338 LValue argument = lowJSValue(m_node->child1());
3339 LValue result = vmCall(Double, operationArithFRound, weakPointer(globalObject), argument);
3340 setDouble(result);
3341 }
3342
3343 void compileIncOrDec()
3344 {
3345 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
3346 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3347 LValue operand = lowJSValue(m_node->child1());
3348 LValue result = vmCall(Int64, m_node->op() == Inc ? operationInc : operationDec, weakPointer(globalObject), operand);
3349 setJSValue(result);
3350 }
3351
3352 void compileValueNegate()
3353 {
3354 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
3355 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_origin.semantic);
3356 BytecodeIndex bytecodeIndex = m_origin.semantic.bytecodeIndex();
3357 UnaryArithProfile* arithProfile = baselineCodeBlock->unaryArithProfileForBytecodeIndex(bytecodeIndex);
3358 auto repatchingFunction = operationArithNegateOptimize;
3359 auto nonRepatchingFunction = operationArithNegate;
3360 compileUnaryMathIC<JITNegGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
3361 }
3362
3363 void compileArithNegate()
3364 {
3365 switch (m_node->child1().useKind()) {
3366 case Int32Use: {
3367 LValue value = lowInt32(m_node->child1());
3368
3369 LValue result;
3370 if (!shouldCheckOverflow(m_node->arithMode()))
3371 result = m_out.neg(value);
3372 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
3373 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
3374 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
3375 result = check;
3376 } else {
3377 speculate(Overflow, noValue(), nullptr, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
3378 result = m_out.neg(value);
3379 }
3380
3381 setInt32(result);
3382 break;
3383 }
3384
3385 case Int52RepUse: {
3386 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)) {
3387 Int52Kind kind;
3388 LValue value = lowWhicheverInt52(m_node->child1(), kind);
3389 LValue result = m_out.neg(value);
3390 if (shouldCheckNegativeZero(m_node->arithMode()))
3391 speculate(NegativeZero, noValue(), nullptr, m_out.isZero64(result));
3392 setInt52(result, kind);
3393 break;
3394 }
3395
3396 LValue value = lowInt52(m_node->child1());
3397 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
3398 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
3399 if (shouldCheckNegativeZero(m_node->arithMode()))
3400 speculate(NegativeZero, noValue(), nullptr, m_out.isZero64(result));
3401 setInt52(result);
3402 break;
3403 }
3404
3405 case DoubleRepUse: {
3406 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
3407 break;
3408 }
3409
3410 default:
3411 DFG_CRASH(m_graph, m_node, "Bad use kind");
3412 break;
3413 }
3414 }
3415
3416 void compileValueBitNot()
3417 {
3418 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3419
3420#if USE(BIGINT32)
3421 if (m_node->child1().useKind() == BigInt32Use) {
3422 LValue operand = lowBigInt32(m_node->child1());
3423 // The following trick relies on details of the representation of BigInt32, and will have to be updated if we move bits around.
3424 static_assert(JSValue::BigInt32Tag == 0x12);
3425 static_assert(JSValue::BigInt32Mask == static_cast<int64_t>(0xfffe000000000012));
3426 uint64_t maskForBigInt32Bits = 0x0000ffffffff0000;
3427 LValue result = m_out.bitXor(operand, m_out.constInt64(maskForBigInt32Bits));
3428 setJSValue(result);
3429 return;
3430 }
3431#endif
3432
3433 if (m_node->child1().useKind() == HeapBigIntUse) {
3434 LValue operand = lowHeapBigInt(m_node->child1());
3435 LValue result = vmCall(pointerType(), operationBitNotHeapBigInt, weakPointer(globalObject), operand);
3436 setJSValue(result);
3437 return;
3438 }
3439
3440 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse || m_node->child1().useKind() == AnyBigIntUse);
3441 LValue operand = lowJSValue(m_node->child1(), ManualOperandSpeculation);
3442 speculate(m_node, m_node->child1());
3443 LValue result = vmCall(Int64, operationValueBitNot, weakPointer(globalObject), operand);
3444 setJSValue(result);
3445 }
3446
3447 void compileArithBitNot()
3448 {
3449 setInt32(m_out.bitNot(lowInt32(m_node->child1())));
3450 }
3451
3452 void compileValueBitAnd()
3453 {
3454 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3455
3456#if USE(BIGINT32)
3457 if (m_node->isBinaryUseKind(BigInt32Use)) {
3458 LValue left = lowBigInt32(m_node->child1());
3459 LValue right = lowBigInt32(m_node->child2());
3460 // No need to unbox, since the tagging is not affected by bitAnd
3461 LValue result = m_out.bitAnd(left, right);
3462 setJSValue(result);
3463 return;
3464 }
3465#endif
3466
3467 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
3468 LValue left = lowHeapBigInt(m_node->child1());
3469 LValue right = lowHeapBigInt(m_node->child2());
3470
3471 LValue result = vmCall(pointerType(), operationBitAndHeapBigInt, weakPointer(globalObject), left, right);
3472 setJSValue(result);
3473 return;
3474 }
3475
3476 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
3477 }
3478
3479 void compileArithBitAnd()
3480 {
3481 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3482 }
3483
3484 void compileValueBitOr()
3485 {
3486 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3487
3488#if USE(BIGINT32)
3489 if (m_node->isBinaryUseKind(BigInt32Use)) {
3490 LValue left = lowBigInt32(m_node->child1());
3491 LValue right = lowBigInt32(m_node->child2());
3492 // No need to unbox, since the tagging is not affected by bitAnd
3493 LValue result = m_out.bitOr(left, right);
3494 setJSValue(result);
3495 return;
3496 }
3497#endif
3498
3499 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
3500 LValue left = lowHeapBigInt(m_node->child1());
3501 LValue right = lowHeapBigInt(m_node->child2());
3502
3503 LValue result = vmCall(pointerType(), operationBitOrHeapBigInt, weakPointer(globalObject), left, right);
3504 setJSValue(result);
3505 return;
3506 }
3507
3508 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
3509 }
3510
3511 void compileArithBitOr()
3512 {
3513 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3514 }
3515
3516 void compileValueBitXor()
3517 {
3518 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3519
3520#if USE(BIGINT32)
3521 if (m_node->isBinaryUseKind(BigInt32Use)) {
3522 LValue left = lowBigInt32(m_node->child1());
3523 LValue right = lowBigInt32(m_node->child2());
3524 LValue resultMissingTag = m_out.bitXor(left, right);
3525 LValue result = m_out.bitOr(resultMissingTag, m_out.constInt64(JSValue::BigInt32Tag));
3526 setJSValue(result);
3527 return;
3528 }
3529#endif
3530
3531 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
3532 LValue left = lowHeapBigInt(m_node->child1());
3533 LValue right = lowHeapBigInt(m_node->child2());
3534
3535 LValue result = vmCall(pointerType(), operationBitXorHeapBigInt, weakPointer(globalObject), left, right);
3536 setJSValue(result);
3537 return;
3538 }
3539
3540 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
3541 }
3542
3543 void compileArithBitXor()
3544 {
3545 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3546 }
3547
3548 void compileValueBitRShift()
3549 {
3550 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3551
3552#if USE(BIGINT32)
3553 if (m_node->isBinaryUseKind(AnyBigIntUse) || m_node->isBinaryUseKind(BigInt32Use)) {
3554 // FIXME: do something smarter here
3555 // Things are a bit tricky because a right-shift by a negative number is a left-shift for BigInts.
3556 // So even a right shift can overflow.
3557 // https://bugs.webkit.org/show_bug.cgi?id=210847
3558
3559 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
3560 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
3561 speculate(m_node, m_node->child1());
3562 speculate(m_node, m_node->child2());
3563
3564 LValue result = vmCall(pointerType(), operationValueBitRShift, weakPointer(globalObject), left, right);
3565 setJSValue(result);
3566 return;
3567 }
3568#endif // USE(BIGINT32)
3569
3570 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
3571 LValue left = lowHeapBigInt(m_node->child1());
3572 LValue right = lowHeapBigInt(m_node->child2());
3573
3574 LValue result = vmCall(pointerType(), operationBitRShiftHeapBigInt, weakPointer(globalObject), left, right);
3575 setJSValue(result);
3576 return;
3577 }
3578
3579 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
3580 }
3581
3582 void compileArithBitRShift()
3583 {
3584 setInt32(m_out.aShr(
3585 lowInt32(m_node->child1()),
3586 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31)))); // FIXME: I don't think that the BitAnd is useful, it is included in the semantics of shift in B3
3587 }
3588
3589 void compileArithBitLShift()
3590 {
3591 setInt32(m_out.shl(
3592 lowInt32(m_node->child1()),
3593 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31)))); // FIXME: I don't think that the BitAnd is useful, it is included in the semantics of shift in B3
3594 }
3595
3596 void compileValueBitLShift()
3597 {
3598 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3599 // FIXME: consider adding a fast path for BigInt32 here.
3600 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
3601 LValue left = lowHeapBigInt(m_node->child1());
3602 LValue right = lowHeapBigInt(m_node->child2());
3603
3604 LValue result = vmCall(pointerType(), operationBitLShiftHeapBigInt, weakPointer(globalObject), left, right);
3605 setJSValue(result);
3606 return;
3607 }
3608
3609 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse) || m_node->isBinaryUseKind(AnyBigIntUse) || m_node->isBinaryUseKind(BigInt32Use));
3610 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
3611 }
3612
3613 void compileBitURShift()
3614 {
3615 if (m_node->isBinaryUseKind(UntypedUse)) {
3616 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
3617 return;
3618 }
3619 setInt32(m_out.lShr(
3620 lowInt32(m_node->child1()),
3621 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31)))); // FIXME: I don't think that the BitAnd is useful, it is included in the semantics of shift in B3
3622 }
3623
3624 void compileUInt32ToNumber()
3625 {
3626 LValue value = lowInt32(m_node->child1());
3627
3628 if (doesOverflow(m_node->arithMode())) {
3629 setStrictInt52(m_out.zeroExtPtr(value));
3630 return;
3631 }
3632
3633 speculate(Overflow, noValue(), nullptr, m_out.lessThan(value, m_out.int32Zero));
3634 setInt32(value);
3635 }
3636
3637 void compileCheckStructure()
3638 {
3639 ExitKind exitKind;
3640 if (m_node->child1()->hasConstant())
3641 exitKind = BadConstantCache;
3642 else
3643 exitKind = BadCache;
3644
3645 switch (m_node->child1().useKind()) {
3646 case CellUse:
3647 case KnownCellUse: {
3648 LValue cell = lowCell(m_node->child1());
3649
3650 checkStructure(
3651 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3652 exitKind, m_node->structureSet(),
3653 [&] (RegisteredStructure structure) {
3654 return weakStructureID(structure);
3655 });
3656 return;
3657 }
3658
3659 case CellOrOtherUse: {
3660 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
3661
3662 LBasicBlock cellCase = m_out.newBlock();
3663 LBasicBlock notCellCase = m_out.newBlock();
3664 LBasicBlock continuation = m_out.newBlock();
3665
3666 m_out.branch(
3667 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3668
3669 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3670 checkStructure(
3671 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
3672 exitKind, m_node->structureSet(),
3673 [&] (RegisteredStructure structure) {
3674 return weakStructureID(structure);
3675 });
3676 m_out.jump(continuation);
3677
3678 m_out.appendTo(notCellCase, continuation);
3679 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
3680 m_out.jump(continuation);
3681
3682 m_out.appendTo(continuation, lastNext);
3683 return;
3684 }
3685
3686 default:
3687 DFG_CRASH(m_graph, m_node, "Bad use kind");
3688 return;
3689 }
3690 }
3691
3692 void compileCheckStructureOrEmpty()
3693 {
3694 ExitKind exitKind;
3695 if (m_node->child1()->hasConstant())
3696 exitKind = BadConstantCache;
3697 else
3698 exitKind = BadCache;
3699
3700 LValue cell = lowCell(m_node->child1());
3701 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
3702 LBasicBlock continuation = nullptr;
3703 LBasicBlock lastNext = nullptr;
3704 if (maySeeEmptyValue) {
3705 LBasicBlock notEmpty = m_out.newBlock();
3706 continuation = m_out.newBlock();
3707 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
3708 lastNext = m_out.appendTo(notEmpty, continuation);
3709 }
3710
3711 checkStructure(
3712 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3713 exitKind, m_node->structureSet(),
3714 [&] (RegisteredStructure structure) {
3715 return weakStructureID(structure);
3716 });
3717
3718 if (maySeeEmptyValue) {
3719 m_out.jump(continuation);
3720 m_out.appendTo(continuation, lastNext);
3721 }
3722 }
3723
3724 void compileCheckIsConstant()
3725 {
3726 if (m_node->child1().useKind() == CellUse) {
3727 LValue cell = lowCell(m_node->child1());
3728
3729 speculate(
3730 BadConstantValue, jsValueValue(cell), m_node->child1().node(),
3731 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
3732 } else {
3733 LValue value = lowJSValue(m_node->child1());
3734
3735 ASSERT(!m_node->constant()->value().isCell() || !m_node->constant()->value());
3736 speculate(
3737 BadConstantValue, jsValueValue(value), m_node->child1().node(),
3738 m_out.notEqual(value, m_out.constInt64(JSValue::encode(m_node->constant()->value()))));
3739 }
3740 }
3741
3742 void compileCheckBadValue()
3743 {
3744 terminate(BadConstantValue);
3745 }
3746
3747 void compileCheckNotEmpty()
3748 {
3749 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
3750 }
3751
3752 void compileAssertNotEmpty()
3753 {
3754 if (!validationEnabled())
3755 return;
3756
3757 LValue val = lowJSValue(m_node->child1());
3758 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3759 patchpoint->appendSomeRegister(val);
3760 patchpoint->setGenerator(
3761 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3762 AllowMacroScratchRegisterUsage allowScratch(jit);
3763 GPRReg input = params[0].gpr();
3764 CCallHelpers::Jump done = jit.branchIfNotEmpty(input);
3765 jit.breakpoint();
3766 done.link(&jit);
3767 });
3768 }
3769
3770 void compileCheckIdent()
3771 {
3772 UniquedStringImpl* uid = m_node->uidOperand();
3773 LValue stringImpl;
3774 if (m_node->child1().useKind() == StringIdentUse)
3775 stringImpl = lowStringIdent(m_node->child1());
3776 else {
3777 ASSERT(m_node->child1().useKind() == SymbolUse);
3778 stringImpl = m_out.loadPtr(lowSymbol(m_node->child1()), m_heaps.Symbol_symbolImpl);
3779 }
3780 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
3781 }
3782
3783 LValue getExecutable(LValue function)
3784 {
3785 LBasicBlock continuation = m_out.newBlock();
3786 LBasicBlock hasRareData = m_out.newBlock();
3787
3788 LValue rareDataTags = m_out.loadPtr(function, m_heaps.JSFunction_executableOrRareData);
3789 ValueFromBlock fastExecutable = m_out.anchor(rareDataTags);
3790 m_out.branch(m_out.testIsZeroPtr(rareDataTags, m_out.constIntPtr(JSFunction::rareDataTag)), unsure(continuation), unsure(hasRareData));
3791
3792 LBasicBlock lastNext = m_out.appendTo(hasRareData, continuation);
3793 LValue rareData = m_out.sub(rareDataTags, m_out.constIntPtr(JSFunction::rareDataTag));
3794 ValueFromBlock slowExecutable = m_out.anchor(m_out.loadPtr(rareData, m_heaps.FunctionRareData_executable));
3795 m_out.jump(continuation);
3796
3797 m_out.appendTo(continuation, lastNext);
3798 return m_out.phi(pointerType(), fastExecutable, slowExecutable);
3799 }
3800
3801 void compileGetExecutable()
3802 {
3803 LValue cell = lowCell(m_node->child1());
3804 speculateFunction(m_node->child1(), cell);
3805 LValue executable = getExecutable(cell);
3806 setJSValue(executable);
3807 }
3808
3809 void compileArrayify()
3810 {
3811 LValue cell = lowCell(m_node->child1());
3812 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : nullptr;
3813
3814 LBasicBlock unexpectedStructure = m_out.newBlock();
3815 LBasicBlock continuation = m_out.newBlock();
3816
3817 auto isUnexpectedArray = [&] (LValue cell) {
3818 if (m_node->op() == Arrayify)
3819 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
3820
3821 ASSERT(m_node->op() == ArrayifyToStructure);
3822 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
3823 };
3824
3825 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
3826
3827 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
3828
3829 if (property) {
3830 switch (m_node->arrayMode().type()) {
3831 case Array::Int32:
3832 case Array::Double:
3833 case Array::Contiguous:
3834 speculate(
3835 Uncountable, noValue(), nullptr,
3836 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
3837 break;
3838 default:
3839 break;
3840 }
3841 }
3842
3843 switch (m_node->arrayMode().type()) {
3844 case Array::Int32:
3845 vmCall(Void, operationEnsureInt32, m_vmValue, cell);
3846 break;
3847 case Array::Double:
3848 vmCall(Void, operationEnsureDouble, m_vmValue, cell);
3849 break;
3850 case Array::Contiguous:
3851 vmCall(Void, operationEnsureContiguous, m_vmValue, cell);
3852 break;
3853 case Array::ArrayStorage:
3854 case Array::SlowPutArrayStorage:
3855 vmCall(Void, operationEnsureArrayStorage, m_vmValue, cell);
3856 break;
3857 default:
3858 DFG_CRASH(m_graph, m_node, "Bad array type");
3859 break;
3860 }
3861
3862 speculate(BadIndexingType, jsValueValue(cell), nullptr, isUnexpectedArray(cell));
3863 m_out.jump(continuation);
3864
3865 m_out.appendTo(continuation, lastNext);
3866 }
3867
3868 void compilePutStructure()
3869 {
3870 RegisteredStructure oldStructure = m_node->transition()->previous;
3871 RegisteredStructure newStructure = m_node->transition()->next;
3872 m_graph.m_plan.transitions().addLazily(m_node->origin.semantic.codeOriginOwner(), oldStructure.get(), newStructure.get());
3873
3874 ASSERT_UNUSED(oldStructure, oldStructure->indexingMode() == newStructure->indexingMode());
3875 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
3876 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
3877
3878 LValue cell = lowCell(m_node->child1());
3879
3880 auto& heap = m_node->transition()->next->transitionKind() == TransitionKind::PropertyDeletion ? m_heaps.JSCellHeaderAndNamedProperties : m_heaps.JSCell_structureID;
3881 TypedPointer pointer { heap, m_out.addPtr(cell, m_heaps.JSCell_structureID.offset()) };
3882
3883 m_out.store32(
3884 weakStructureID(newStructure), pointer);
3885 }
3886
3887 void compileGetById(AccessType type)
3888 {
3889 ASSERT(type == AccessType::GetById || type == AccessType::TryGetById || type == AccessType::GetByIdDirect);
3890 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3891 switch (m_node->child1().useKind()) {
3892 case CellUse: {
3893 setJSValue(getById(lowCell(m_node->child1()), type));
3894 return;
3895 }
3896
3897 case UntypedUse: {
3898 // This is pretty weird, since we duplicate the slow path both here and in the
3899 // code generated by the IC. We should investigate making this less bad.
3900 // https://bugs.webkit.org/show_bug.cgi?id=127830
3901 LValue value = lowJSValue(m_node->child1());
3902
3903 LBasicBlock cellCase = m_out.newBlock();
3904 LBasicBlock notCellCase = m_out.newBlock();
3905 LBasicBlock continuation = m_out.newBlock();
3906
3907 m_out.branch(
3908 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3909
3910 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3911 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
3912 m_out.jump(continuation);
3913
3914 auto getByIdFunction = appropriateGenericGetByIdFunction(type);
3915
3916 m_out.appendTo(notCellCase, continuation);
3917 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3918 Int64, getByIdFunction,
3919 weakPointer(globalObject), value,
3920 m_out.constIntPtr(m_node->cacheableIdentifier().rawBits())));
3921 m_out.jump(continuation);
3922
3923 m_out.appendTo(continuation, lastNext);
3924 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3925 return;
3926 }
3927
3928 default:
3929 DFG_CRASH(m_graph, m_node, "Bad use kind");
3930 return;
3931 }
3932 }
3933
3934 void compileGetByIdWithThis()
3935 {
3936 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3937 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3938 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3939 else {
3940 LValue base = lowJSValue(m_node->child1());
3941 LValue thisValue = lowJSValue(m_node->child2());
3942
3943 LBasicBlock baseCellCase = m_out.newBlock();
3944 LBasicBlock notCellCase = m_out.newBlock();
3945 LBasicBlock thisValueCellCase = m_out.newBlock();
3946 LBasicBlock continuation = m_out.newBlock();
3947
3948 m_out.branch(
3949 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3950
3951 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3952
3953 m_out.branch(
3954 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3955
3956 m_out.appendTo(thisValueCellCase, notCellCase);
3957 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3958 m_out.jump(continuation);
3959
3960 m_out.appendTo(notCellCase, continuation);
3961 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3962 Int64, operationGetByIdWithThisGeneric,
3963 weakPointer(globalObject), base, thisValue,
3964 m_out.constIntPtr(m_node->cacheableIdentifier().rawBits())));
3965 m_out.jump(continuation);
3966
3967 m_out.appendTo(continuation, lastNext);
3968 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3969 }
3970
3971 }
3972
3973 void compileGetByValWithThis()
3974 {
3975 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
3976 LValue base = lowJSValue(m_node->child1());
3977 LValue thisValue = lowJSValue(m_node->child2());
3978 LValue subscript = lowJSValue(m_node->child3());
3979
3980 LValue result = vmCall(Int64, operationGetByValWithThis, weakPointer(globalObject), base, thisValue, subscript);
3981 setJSValue(result);
3982 }
3983
3984 LValue getPrivateName(LValue base, LValue property)
3985 {
3986 Node* node = m_node;
3987 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
3988 patchpoint->appendSomeRegister(base);
3989 patchpoint->appendSomeRegister(property);
3990 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
3991 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
3992 patchpoint->clobber(RegisterSet::macroScratchRegisters());
3993 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 1 : 0;
3994
3995 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
3996
3997 State* state = &m_ftlState;
3998 bool baseIsCell = abstractValue(node->child1()).isType(SpecCell);
3999 CodeOrigin nodeSemanticOrigin = node->origin.semantic;
4000 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
4001 AllowMacroScratchRegisterUsage allowScratch(jit);
4002
4003 CallSiteIndex callSiteIndex = state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(nodeSemanticOrigin);
4004
4005 // This is the direct exit target for operation calls.
4006 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
4007
4008 // This is the exit for call IC's created by the IC for getters. We don't have
4009 // to do anything weird other than call this, since it will associate the exit with
4010 // the callsite index.
4011 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
4012
4013 GPRReg resultGPR = params[0].gpr();
4014 GPRReg baseGPR = params[1].gpr();
4015 GPRReg propertyGPR = params[2].gpr();
4016 GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(0) : InvalidGPRReg;
4017
4018 auto generator = Box<JITGetByValGenerator>::create(
4019 jit.codeBlock(), JITType::FTLJIT, nodeSemanticOrigin, callSiteIndex, AccessType::GetPrivateName,
4020 params.unavailableRegisters(), JSValueRegs(baseGPR), JSValueRegs(propertyGPR), JSValueRegs(resultGPR), stubInfoGPR);
4021
4022 CCallHelpers::Jump notCell;
4023 if (!baseIsCell)
4024 notCell = jit.branchIfNotCell(baseGPR);
4025
4026 generator->generateFastPath(jit);
4027 CCallHelpers::Label done = jit.label();
4028
4029 params.addLatePath([=] (CCallHelpers& jit) {
4030 AllowMacroScratchRegisterUsage allowScratch(jit);
4031
4032 if (notCell.isSet())
4033 notCell.link(&jit);
4034 if (!JITCode::useDataIC(JITType::FTLJIT))
4035 generator->slowPathJump().link(&jit);
4036 CCallHelpers::Label slowPathBegin = jit.label();
4037 CCallHelpers::Call slowPathCall;
4038 if (JITCode::useDataIC(JITType::FTLJIT)) {
4039 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
4040 generator->stubInfo()->m_slowOperation = operationGetPrivateNameOptimize;
4041 slowPathCall = callOperation(
4042 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
4043 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), resultGPR,
4044 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
4045 stubInfoGPR, baseGPR, propertyGPR).call();
4046 } else {
4047 slowPathCall = callOperation(
4048 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
4049 exceptions.get(), operationGetPrivateNameOptimize, resultGPR,
4050 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
4051 CCallHelpers::TrustedImmPtr(generator->stubInfo()), baseGPR, propertyGPR).call();
4052 }
4053 jit.jump().linkTo(done, &jit);
4054
4055 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
4056
4057 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
4058 generator->finalize(linkBuffer, linkBuffer);
4059 });
4060 });
4061 });
4062
4063 return patchpoint;
4064 }
4065
4066 void compileGetPrivateName()
4067 {
4068 if (m_node->child1().useKind() == CellUse)
4069 setJSValue(getPrivateName(lowCell(m_node->child1()), lowSymbol(m_node->child2())));
4070 else {
4071 LValue base = lowJSValue(m_node->child1());
4072 LValue property = lowSymbol(m_node->child2());
4073
4074 LBasicBlock baseCellCase = m_out.newBlock();
4075 LBasicBlock notCellCase = m_out.newBlock();
4076 LBasicBlock continuation = m_out.newBlock();
4077
4078 m_out.branch(
4079 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
4080
4081 LBasicBlock lastNext = m_out.appendTo(baseCellCase, notCellCase);
4082
4083 ValueFromBlock cellResult = m_out.anchor(getPrivateName(base, property));
4084 m_out.jump(continuation);
4085
4086 m_out.appendTo(notCellCase, continuation);
4087 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4088 ValueFromBlock notCellResult = m_out.anchor(vmCall(
4089 Int64, operationGetPrivateName,
4090 weakPointer(globalObject), m_out.constIntPtr(0), base,
4091 property));
4092 m_out.jump(continuation);
4093
4094 m_out.appendTo(continuation, lastNext);
4095 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
4096 }
4097 }
4098
4099 void compileGetPrivateNameById()
4100 {
4101 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
4102 if (m_node->child1().useKind() == CellUse)
4103 setJSValue(getById(lowCell(m_node->child1()), AccessType::GetPrivateName));
4104 else {
4105 LValue base = lowJSValue(m_node->child1());
4106
4107 LBasicBlock baseCellCase = m_out.newBlock();
4108 LBasicBlock notCellCase = m_out.newBlock();
4109 LBasicBlock continuation = m_out.newBlock();
4110
4111 m_out.branch(
4112 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
4113
4114 LBasicBlock lastNext = m_out.appendTo(baseCellCase, notCellCase);
4115
4116 ValueFromBlock cellResult = m_out.anchor(getById(base, AccessType::GetPrivateName));
4117 m_out.jump(continuation);
4118
4119 m_out.appendTo(notCellCase, continuation);
4120 ValueFromBlock notCellResult = m_out.anchor(vmCall(
4121 Int64, operationGetPrivateNameByIdGeneric,
4122 weakPointer(globalObject), base,
4123 m_out.constIntPtr(m_node->cacheableIdentifier().rawBits())));
4124 m_out.jump(continuation);
4125
4126 m_out.appendTo(continuation, lastNext);
4127 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
4128 }
4129 }
4130
4131 void compilePrivateBrandAccess(LValue base, LValue brand, AccessType accessType)
4132 {
4133 Node* node = m_node;
4134 PatchpointValue* patchpoint = m_out.patchpoint(Void);
4135 patchpoint->appendSomeRegister(base);
4136 patchpoint->appendSomeRegister(brand);
4137 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
4138 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
4139 patchpoint->clobber(RegisterSet::macroScratchRegisters());
4140 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 1 : 0;
4141
4142 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
4143
4144 State* state = &m_ftlState;
4145 bool baseIsCell = abstractValue(m_node->child1()).isType(SpecCell);
4146 CodeOrigin nodeSemanticOrigin = node->origin.semantic;
4147 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
4148 AllowMacroScratchRegisterUsage allowScratch(jit);
4149
4150 CallSiteIndex callSiteIndex = state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(nodeSemanticOrigin);
4151
4152 // This is the direct exit target for operation calls.
4153 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
4154
4155 // This is the exit for call IC's created by the IC for getters. We don't have
4156 // to do anything weird other than call this, since it will associate the exit with
4157 // the callsite index.
4158 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
4159
4160 GPRReg baseGPR = params[0].gpr();
4161 GPRReg brandGPR = params[1].gpr();
4162 GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(0) : InvalidGPRReg;
4163
4164 auto generator = Box<JITPrivateBrandAccessGenerator>::create(
4165 jit.codeBlock(), JITType::FTLJIT, nodeSemanticOrigin, callSiteIndex, accessType,
4166 params.unavailableRegisters(), JSValueRegs(baseGPR), JSValueRegs(brandGPR), stubInfoGPR);
4167
4168 CCallHelpers::Jump notCell;
4169 if (!baseIsCell)
4170 notCell = jit.branchIfNotCell(baseGPR);
4171
4172 generator->generateFastPath(jit);
4173 CCallHelpers::Label done = jit.label();
4174
4175 params.addLatePath([=] (CCallHelpers& jit) {
4176 AllowMacroScratchRegisterUsage allowScratch(jit);
4177
4178 auto appropriatePrivateAccessFunction = [=] (AccessType type) -> decltype(&operationCheckPrivateBrandOptimize) {
4179 switch (type) {
4180 case AccessType::CheckPrivateBrand:
4181 return operationCheckPrivateBrandOptimize;
4182 case AccessType::SetPrivateBrand:
4183 return operationSetPrivateBrandOptimize;
4184 default:
4185 RELEASE_ASSERT_NOT_REACHED();
4186 return nullptr;
4187 }
4188 };
4189
4190 if (notCell.isSet())
4191 notCell.link(&jit);
4192 if (!JITCode::useDataIC(JITType::FTLJIT))
4193 generator->slowPathJump().link(&jit);
4194 CCallHelpers::Label slowPathBegin = jit.label();
4195 CCallHelpers::Call slowPathCall;
4196 if (JITCode::useDataIC(JITType::FTLJIT)) {
4197 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
4198 generator->stubInfo()->m_slowOperation = appropriatePrivateAccessFunction(accessType);
4199 slowPathCall = callOperation(
4200 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
4201 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), InvalidGPRReg,
4202 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
4203 stubInfoGPR, baseGPR, brandGPR).call();
4204 } else {
4205 slowPathCall = callOperation(
4206 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
4207 exceptions.get(), appropriatePrivateAccessFunction(accessType), InvalidGPRReg,
4208 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
4209 CCallHelpers::TrustedImmPtr(generator->stubInfo()), baseGPR, brandGPR).call();
4210 }
4211 jit.jump().linkTo(done, &jit);
4212
4213 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
4214
4215 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
4216 generator->finalize(linkBuffer, linkBuffer);
4217 });
4218 });
4219 });
4220 }
4221
4222 void compileCheckPrivateBrand()
4223 {
4224 compilePrivateBrandAccess(lowJSValue(m_node->child1()), lowSymbol(m_node->child2()), AccessType::CheckPrivateBrand);
4225 }
4226
4227 void compileSetPrivateBrand()
4228 {
4229 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse, m_node->child1().useKind());
4230 compilePrivateBrandAccess(lowCell(m_node->child1()), lowSymbol(m_node->child2()), AccessType::SetPrivateBrand);
4231 }
4232
4233 void compilePutByIdWithThis()
4234 {
4235 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4236 LValue base = lowJSValue(m_node->child1());
4237 LValue thisValue = lowJSValue(m_node->child2());
4238 LValue value = lowJSValue(m_node->child3());
4239
4240 vmCall(Void, m_node->ecmaMode().isStrict() ? operationPutByIdWithThisStrict : operationPutByIdWithThis,
4241 weakPointer(globalObject), base, thisValue, value, m_out.constIntPtr(m_node->cacheableIdentifier().rawBits()));
4242 }
4243
4244 void compilePutByValWithThis()
4245 {
4246 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4247 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
4248 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
4249 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
4250 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
4251
4252 vmCall(Void, m_node->ecmaMode().isStrict() ? operationPutByValWithThisStrict : operationPutByValWithThis,
4253 weakPointer(globalObject), base, thisValue, property, value);
4254 }
4255
4256 void compilePutPrivateNameById()
4257 {
4258 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse, m_node->child1().useKind());
4259
4260 LValue base = lowCell(m_node->child1());
4261 LValue value = lowJSValue(m_node->child2());
4262
4263 // We emit property check during DFG generation, so we don't need
4264 // to check it here.
4265 auto putKind = m_node->privateFieldPutKind().isDefine() ? PutKind::DirectPrivateFieldDefine : PutKind::DirectPrivateFieldSet;
4266 cachedPutById(m_node, base, value, ECMAMode::strict(), putKind);
4267 }
4268
4269 void compilePutPrivateName()
4270 {
4271 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
4272 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
4273
4274 LValue base = lowJSValue(m_node->child1());
4275 LValue property = lowSymbol(m_node->child2());
4276 LValue value = lowJSValue(m_node->child3());
4277
4278 vmCall(Void, operationPutPrivateNameGeneric,
4279 weakPointer(globalObject), base, property, value, m_out.constIntPtr(0), m_out.constInt32(m_node->privateFieldPutKind().value()));
4280 }
4281
4282 void compileAtomicsReadModifyWrite()
4283 {
4284 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4285 TypedArrayType type = m_node->arrayMode().typedArrayType();
4286 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
4287 Edge baseEdge = m_graph.child(m_node, 0);
4288 Edge indexEdge = m_graph.child(m_node, 1);
4289 Edge argEdges[maxNumExtraAtomicsArgs];
4290 for (unsigned i = numExtraArgs; i--;)
4291 argEdges[i] = m_graph.child(m_node, 2 + i);
4292 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
4293
4294 if (!storageEdge) {
4295 auto callWith0 = [&] (auto* operation) {
4296 ASSERT(numExtraArgs == 0);
4297 return vmCall(Int64, operation, weakPointer(globalObject), lowJSValue(baseEdge), lowJSValue(indexEdge));
4298 };
4299
4300 auto callWith1 = [&] (auto* operation) {
4301 ASSERT(numExtraArgs == 1);
4302 return vmCall(Int64, operation, weakPointer(globalObject), lowJSValue(baseEdge), lowJSValue(indexEdge), lowJSValue(argEdges[0]));
4303 };
4304
4305 auto callWith2 = [&] (auto* operation) {
4306 ASSERT(numExtraArgs == 2);
4307 return vmCall(Int64, operation, weakPointer(globalObject), lowJSValue(baseEdge), lowJSValue(indexEdge), lowJSValue(argEdges[0]), lowJSValue(argEdges[1]));
4308 };
4309
4310 LValue result;
4311 switch (m_node->op()) {
4312 case AtomicsAdd:
4313 result = callWith1(operationAtomicsAdd);
4314 break;
4315 case AtomicsAnd:
4316 result = callWith1(operationAtomicsAnd);
4317 break;
4318 case AtomicsCompareExchange:
4319 result = callWith2(operationAtomicsCompareExchange);
4320 break;
4321 case AtomicsExchange:
4322 result = callWith1(operationAtomicsExchange);
4323 break;
4324 case AtomicsLoad:
4325 result = callWith0(operationAtomicsLoad);
4326 break;
4327 case AtomicsOr:
4328 result = callWith1(operationAtomicsOr);
4329 break;
4330 case AtomicsStore:
4331 result = callWith1(operationAtomicsStore);
4332 break;
4333 case AtomicsSub:
4334 result = callWith1(operationAtomicsSub);
4335 break;
4336 case AtomicsXor:
4337 result = callWith1(operationAtomicsXor);
4338 break;
4339 default:
4340 RELEASE_ASSERT_NOT_REACHED();
4341 }
4342 setJSValue(result);
4343 return;
4344 }
4345
4346 LValue index = lowInt32(indexEdge);
4347 LValue args[2];
4348 for (unsigned i = numExtraArgs; i--;)
4349 args[i] = getIntTypedArrayStoreOperand(argEdges[i]);
4350 LValue storage = lowStorage(storageEdge);
4351
4352 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
4353 Width width = widthForBytes(elementSize(type));
4354
4355 LValue atomicValue;
4356 LValue result;
4357
4358 auto sanitizeResult = [&] (LValue value) -> LValue {
4359 if (isSigned(type)) {
4360 switch (elementSize(type)) {
4361 case 1:
4362 value = m_out.bitAnd(value, m_out.constInt32(0xff));
4363 break;
4364 case 2:
4365 value = m_out.bitAnd(value, m_out.constInt32(0xffff));
4366 break;
4367 case 4:
4368 break;
4369 default:
4370 RELEASE_ASSERT_NOT_REACHED();
4371 break;
4372 }
4373 }
4374 return value;
4375 };
4376
4377 switch (m_node->op()) {
4378 case AtomicsAdd:
4379 atomicValue = m_out.atomicXchgAdd(args[0], pointer, width);
4380 result = sanitizeResult(atomicValue);
4381 break;
4382 case AtomicsAnd:
4383 atomicValue = m_out.atomicXchgAnd(args[0], pointer, width);
4384 result = sanitizeResult(atomicValue);
4385 break;
4386 case AtomicsCompareExchange:
4387 atomicValue = m_out.atomicStrongCAS(args[0], args[1], pointer, width);
4388 result = sanitizeResult(atomicValue);
4389 break;
4390 case AtomicsExchange:
4391 atomicValue = m_out.atomicXchg(args[0], pointer, width);
4392 result = sanitizeResult(atomicValue);
4393 break;
4394 case AtomicsLoad:
4395 atomicValue = m_out.atomicXchgAdd(m_out.int32Zero, pointer, width);
4396 result = sanitizeResult(atomicValue);
4397 break;
4398 case AtomicsOr:
4399 atomicValue = m_out.atomicXchgOr(args[0], pointer, width);
4400 result = sanitizeResult(atomicValue);
4401 break;
4402 case AtomicsStore:
4403 atomicValue = m_out.atomicXchg(args[0], pointer, width);
4404 result = args[0];
4405 break;
4406 case AtomicsSub:
4407 atomicValue = m_out.atomicXchgSub(args[0], pointer, width);
4408 result = sanitizeResult(atomicValue);
4409 break;
4410 case AtomicsXor:
4411 atomicValue = m_out.atomicXchgXor(args[0], pointer, width);
4412 result = sanitizeResult(atomicValue);
4413 break;
4414 default:
4415 RELEASE_ASSERT_NOT_REACHED();
4416 break;
4417 }
4418 // Signify that the state against which the atomic operations are serialized is confined to just
4419 // the typed array storage, since that's as precise of an abstraction as we can have of shared
4420 // array buffer storage.
4421 m_heaps.decorateFencedAccess(&m_heaps.typedArrayProperties, atomicValue);
4422
4423 // We have to keep base alive since that keeps storage alive.
4424 ensureStillAliveHere(lowCell(baseEdge));
4425
4426 if (m_node->op() == AtomicsStore) {
4427 Edge operand = argEdges[0];
4428 switch (operand.useKind()) {
4429 case Int32Use:
4430 setInt32(lowInt32(operand));
4431 break;
4432 case Int52RepUse:
4433 setStrictInt52(lowStrictInt52(operand));
4434 break;
4435 case DoubleRepUse:
4436 setDouble(toIntegerOrInfinity(lowDouble(operand)));
4437 break;
4438 default:
4439 DFG_CRASH(m_graph, m_node, "Bad result type");
4440 break;
4441 }
4442 return;
4443 }
4444 constexpr bool canSpeculate = false;
4445 setIntTypedArrayLoadResult(result, type, canSpeculate);
4446 }
4447
4448 void compileAtomicsIsLockFree()
4449 {
4450 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4451 Edge child1 = m_graph.child(m_node, 0);
4452 if (child1.useKind() != Int32Use) {
4453 setJSValue(vmCall(Int64, operationAtomicsIsLockFree, weakPointer(globalObject), lowJSValue(child1)));
4454 return;
4455 }
4456
4457 LValue bytes = lowInt32(child1);
4458
4459 LBasicBlock trueCase = m_out.newBlock();
4460 LBasicBlock falseCase = m_out.newBlock();
4461 LBasicBlock continuation = m_out.newBlock();
4462
4463 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueCase);
4464
4465 Vector<SwitchCase, 4> cases;
4466 cases.append(SwitchCase(m_out.constInt32(1), trueCase, Weight()));
4467 cases.append(SwitchCase(m_out.constInt32(2), trueCase, Weight()));
4468 cases.append(SwitchCase(m_out.constInt32(4), trueCase, Weight()));
4469 cases.append(SwitchCase(m_out.constInt32(8), trueCase, Weight()));
4470 m_out.switchInstruction(bytes, cases, falseCase, Weight());
4471
4472 m_out.appendTo(trueCase, falseCase);
4473 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
4474 m_out.jump(continuation);
4475 m_out.appendTo(falseCase, continuation);
4476 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
4477 m_out.jump(continuation);
4478
4479 m_out.appendTo(continuation, lastNext);
4480 setBoolean(m_out.phi(Int32, trueValue, falseValue));
4481 }
4482
4483 void compileDefineDataProperty()
4484 {
4485 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4486 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4487 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
4488 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 3));
4489 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
4490 switch (propertyEdge.useKind()) {
4491 case StringUse: {
4492 LValue property = lowString(propertyEdge);
4493 vmCall(Void, operationDefineDataPropertyString, weakPointer(globalObject), base, property, value, attributes);
4494 break;
4495 }
4496 case StringIdentUse: {
4497 LValue property = lowStringIdent(propertyEdge);
4498 vmCall(Void, operationDefineDataPropertyStringIdent, weakPointer(globalObject), base, property, value, attributes);
4499 break;
4500 }
4501 case SymbolUse: {
4502 LValue property = lowSymbol(propertyEdge);
4503 vmCall(Void, operationDefineDataPropertySymbol, weakPointer(globalObject), base, property, value, attributes);
4504 break;
4505 }
4506 case UntypedUse: {
4507 LValue property = lowJSValue(propertyEdge);
4508 vmCall(Void, operationDefineDataProperty, weakPointer(globalObject), base, property, value, attributes);
4509 break;
4510 }
4511 default:
4512 RELEASE_ASSERT_NOT_REACHED();
4513 }
4514 }
4515
4516 void compileDefineAccessorProperty()
4517 {
4518 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4519 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4520 LValue getter = lowCell(m_graph.varArgChild(m_node, 2));
4521 LValue setter = lowCell(m_graph.varArgChild(m_node, 3));
4522 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 4));
4523 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
4524 switch (propertyEdge.useKind()) {
4525 case StringUse: {
4526 LValue property = lowString(propertyEdge);
4527 vmCall(Void, operationDefineAccessorPropertyString, weakPointer(globalObject), base, property, getter, setter, attributes);
4528 break;
4529 }
4530 case StringIdentUse: {
4531 LValue property = lowStringIdent(propertyEdge);
4532 vmCall(Void, operationDefineAccessorPropertyStringIdent, weakPointer(globalObject), base, property, getter, setter, attributes);
4533 break;
4534 }
4535 case SymbolUse: {
4536 LValue property = lowSymbol(propertyEdge);
4537 vmCall(Void, operationDefineAccessorPropertySymbol, weakPointer(globalObject), base, property, getter, setter, attributes);
4538 break;
4539 }
4540 case UntypedUse: {
4541 LValue property = lowJSValue(propertyEdge);
4542 vmCall(Void, operationDefineAccessorProperty, weakPointer(globalObject), base, property, getter, setter, attributes);
4543 break;
4544 }
4545 default:
4546 RELEASE_ASSERT_NOT_REACHED();
4547 }
4548 }
4549
4550 void cachedPutById(Node* node, LValue base, LValue value, ECMAMode ecmaMode, PutKind putKind)
4551 {
4552 CacheableIdentifier identifier = node->cacheableIdentifier();
4553 ASSERT(identifier);
4554
4555 PatchpointValue* patchpoint = m_out.patchpoint(Void);
4556 patchpoint->appendSomeRegister(base);
4557 patchpoint->appendSomeRegister(value);
4558 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
4559 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
4560 patchpoint->clobber(RegisterSet::macroScratchRegisters());
4561 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 1 : 0;
4562
4563 // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
4564 // https://bugs.webkit.org/show_bug.cgi?id=152848
4565
4566 RefPtr<PatchpointExceptionHandle> exceptionHandle =
4567 preparePatchpointForExceptions(patchpoint);
4568
4569 State* state = &m_ftlState;
4570
4571 CodeOrigin nodeSemanticOrigin = node->origin.semantic;
4572 patchpoint->setGenerator(
4573 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
4574 AllowMacroScratchRegisterUsage allowScratch(jit);
4575
4576 CallSiteIndex callSiteIndex =
4577 state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(nodeSemanticOrigin);
4578
4579 Box<CCallHelpers::JumpList> exceptions =
4580 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
4581
4582 // JS setter call ICs generated by the PutById IC will need this.
4583 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
4584
4585 GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(0) : InvalidGPRReg;
4586
4587 auto generator = Box<JITPutByIdGenerator>::create(
4588 jit.codeBlock(), JITType::FTLJIT, nodeSemanticOrigin, callSiteIndex,
4589 params.unavailableRegisters(), identifier, JSValueRegs(params[0].gpr()),
4590 JSValueRegs(params[1].gpr()), stubInfoGPR, GPRInfo::patchpointScratchRegister, ecmaMode,
4591 putKind);
4592
4593 generator->generateFastPath(jit);
4594 CCallHelpers::Label done = jit.label();
4595
4596 params.addLatePath(
4597 [=] (CCallHelpers& jit) {
4598 AllowMacroScratchRegisterUsage allowScratch(jit);
4599
4600 generator->slowPathJump().link(&jit);
4601 CCallHelpers::Label slowPathBegin = jit.label();
4602 CCallHelpers::Call slowPathCall;
4603 if (JITCode::useDataIC(JITType::FTLJIT)) {
4604 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
4605 generator->stubInfo()->m_slowOperation = generator->slowPathFunction();
4606 slowPathCall = callOperation(
4607 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
4608 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), InvalidGPRReg,
4609 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
4610 stubInfoGPR, params[1].gpr(),
4611 params[0].gpr(), identifier.rawBits()).call();
4612 } else {
4613 slowPathCall = callOperation(
4614 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
4615 exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
4616 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
4617 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
4618 params[0].gpr(), identifier.rawBits()).call();
4619 }
4620 jit.jump().linkTo(done, &jit);
4621
4622 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
4623
4624 jit.addLinkTask(
4625 [=] (LinkBuffer& linkBuffer) {
4626 generator->finalize(linkBuffer, linkBuffer);
4627 });
4628 });
4629 });
4630 }
4631
4632 void compilePutById()
4633 {
4634 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse, m_node->child1().useKind());
4635
4636 Node* node = m_node;
4637 LValue base = lowCell(node->child1());
4638 LValue value = lowJSValue(node->child2());
4639 auto putKind = node->op() == PutByIdDirect ? PutKind::Direct : PutKind::NotDirect;
4640 cachedPutById(node, base, value, node->ecmaMode(), putKind);
4641 }
4642
4643 void compileGetButterfly()
4644 {
4645 LValue butterfly = m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly);
4646 setStorage(butterfly);
4647 }
4648
4649 void compileConstantStoragePointer()
4650 {
4651 setStorage(m_out.constIntPtr(m_node->storagePointer()));
4652 }
4653
4654 void compileGetIndexedPropertyStorage()
4655 {
4656 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4657 LValue cell = lowCell(m_node->child1());
4658
4659 if (m_node->arrayMode().type() == Array::String) {
4660 LBasicBlock slowPath = m_out.newBlock();
4661 LBasicBlock continuation = m_out.newBlock();
4662
4663 LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
4664 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
4665
4666 m_out.branch(isRopeString(cell, m_node->child1()), rarely(slowPath), usually(continuation));
4667
4668 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
4669
4670 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), operationResolveRope, weakPointer(globalObject), cell));
4671
4672 m_out.jump(continuation);
4673
4674 m_out.appendTo(continuation, lastNext);
4675
4676 setStorage(m_out.loadPtr(m_out.phi(pointerType(), fastResult, slowResult), m_heaps.StringImpl_data));
4677 return;
4678 }
4679
4680 DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()), m_node->arrayMode().typedArrayType());
4681 LValue vector = m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector);
4682 setStorage(caged(Gigacage::Primitive, vector, cell));
4683 }
4684
4685 void compileCheckArray()
4686 {
4687 Edge edge = m_node->child1();
4688 LValue cell = lowCell(edge);
4689
4690 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
4691 return;
4692
4693 speculate(
4694 BadIndexingType, jsValueValue(cell), nullptr,
4695 m_out.logicalNot(isArrayTypeForCheckArray(cell, m_node->arrayMode())));
4696 }
4697
4698 void compileCheckArrayOrEmpty()
4699 {
4700 Edge edge = m_node->child1();
4701 LValue cell = lowCell(edge);
4702
4703 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge))) {
4704 // We can purge Empty check of CheckArrayOrEmpty completely in this case since CellUse only accepts SpecCell | SpecEmpty.
4705 ASSERT(typeFilterFor(m_node->child1().useKind()) & SpecEmpty);
4706 return;
4707 }
4708
4709 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
4710 LBasicBlock continuation = nullptr;
4711 LBasicBlock lastNext = nullptr;
4712 if (maySeeEmptyValue) {
4713 LBasicBlock notEmpty = m_out.newBlock();
4714 continuation = m_out.newBlock();
4715 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
4716 lastNext = m_out.appendTo(notEmpty, continuation);
4717 }
4718
4719 speculate(
4720 BadIndexingType, jsValueValue(cell), nullptr,
4721 m_out.logicalNot(isArrayTypeForCheckArray(cell, m_node->arrayMode())));
4722
4723 if (maySeeEmptyValue) {
4724 m_out.jump(continuation);
4725 m_out.appendTo(continuation, lastNext);
4726 }
4727 }
4728
4729 void compileCheckDetached()
4730 {
4731 Edge edge = m_node->child1();
4732 LValue cell = lowCell(edge);
4733
4734 speculate(
4735 BadIndexingType, jsValueValue(cell), edge.node(),
4736 m_out.isNull(m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector)));
4737 }
4738
4739 void compileGetTypedArrayByteOffset()
4740 {
4741 LValue basePtr = lowCell(m_node->child1());
4742
4743 LBasicBlock wastefulCase = m_out.newBlock();
4744 LBasicBlock notNull = m_out.newBlock();
4745 LBasicBlock continuation = m_out.newBlock();
4746
4747 ValueFromBlock nullVectorOut = m_out.anchor(m_out.constIntPtr(0));
4748
4749 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
4750 m_out.branch(
4751 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
4752 unsure(continuation), unsure(wastefulCase));
4753
4754 LBasicBlock lastNext = m_out.appendTo(wastefulCase, notNull);
4755
4756 LValue vector = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
4757 m_out.branch(m_out.equal(vector, m_out.constIntPtr(JSArrayBufferView::nullVectorPtr())),
4758 unsure(continuation), unsure(notNull));
4759
4760 m_out.appendTo(notNull, continuation);
4761
4762 LValue butterflyPtr = caged(Gigacage::JSValue, m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly), basePtr);
4763 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
4764
4765 LValue vectorPtr = caged(Gigacage::Primitive, vector, basePtr);
4766
4767 // FIXME: This needs caging.
4768 // https://bugs.webkit.org/show_bug.cgi?id=175515
4769 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
4770 dataPtr = removeArrayPtrTag(dataPtr);
4771
4772 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
4773
4774 m_out.jump(continuation);
4775 m_out.appendTo(continuation, lastNext);
4776
4777 setInt32(m_out.castToInt32(m_out.phi(pointerType(), nullVectorOut, wastefulOut)));
4778 }
4779
4780 void compileGetPrototypeOf()
4781 {
4782 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
4783
4784 LValue object = nullptr;
4785 LValue structure = nullptr;
4786 ValueFromBlock slowResult;
4787
4788 LBasicBlock loadPolyProto = m_out.newBlock();
4789 LBasicBlock continuation = m_out.newBlock();
4790 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
4791
4792 switch (m_node->child1().useKind()) {
4793 case ArrayUse:
4794 case FunctionUse:
4795 case FinalObjectUse: {
4796 object = lowCell(m_node->child1());
4797 switch (m_node->child1().useKind()) {
4798 case ArrayUse:
4799 speculateArray(m_node->child1(), object);
4800 break;
4801 case FunctionUse:
4802 speculateFunction(m_node->child1(), object);
4803 break;
4804 case FinalObjectUse:
4805 speculateFinalObject(m_node->child1(), object);
4806 break;
4807 default:
4808 RELEASE_ASSERT_NOT_REACHED();
4809 break;
4810 }
4811
4812 structure = loadStructure(object);
4813
4814 AbstractValue& value = m_state.forNode(m_node->child1());
4815 if ((value.m_type && !(value.m_type & ~SpecObject)) && value.m_structure.isFinite()) {
4816 bool hasPolyProto = false;
4817 bool hasMonoProto = false;
4818 value.m_structure.forEach([&] (RegisteredStructure structure) {
4819 if (structure->hasPolyProto())
4820 hasPolyProto = true;
4821 else
4822 hasMonoProto = true;
4823 });
4824
4825 if (hasMonoProto && !hasPolyProto) {
4826 setJSValue(m_out.load64(structure, m_heaps.Structure_prototype));
4827 return;
4828 }
4829
4830 if (hasPolyProto && !hasMonoProto) {
4831 setJSValue(m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
4832 return;
4833 }
4834 }
4835
4836 break;
4837 }
4838 case ObjectUse: {
4839 object = lowObject(m_node->child1());
4840
4841 LBasicBlock fastPath = m_out.newBlock();
4842 LBasicBlock slowPath = m_out.newBlock();
4843
4844 structure = loadStructure(object);
4845 m_out.branch(
4846 m_out.testIsZero32(
4847 m_out.load16ZeroExt32(structure, m_heaps.Structure_outOfLineTypeFlags),
4848 m_out.constInt32(OverridesGetPrototypeOutOfLine)),
4849 usually(fastPath), rarely(slowPath));
4850
4851 m_out.appendTo(slowPath, fastPath);
4852 slowResult = m_out.anchor(vmCall(Int64, operationGetPrototypeOfObject, weakPointer(globalObject), object));
4853 m_out.jump(continuation);
4854
4855 m_out.appendTo(fastPath, loadPolyProto);
4856 break;
4857 }
4858 default: {
4859 object = lowJSValue(m_node->child1());
4860 SpeculatedType valueType = provenType(m_node->child1());
4861
4862 LBasicBlock isCellPath = m_out.newBlock();
4863 LBasicBlock isObjectPath = m_out.newBlock();
4864 LBasicBlock fastPath = m_out.newBlock();
4865 LBasicBlock slowPath = m_out.newBlock();
4866
4867 m_out.branch(isCell(object, valueType), usually(isCellPath), rarely(slowPath));
4868 m_out.appendTo(isCellPath, isObjectPath);
4869 m_out.branch(isObject(object, valueType), usually(isObjectPath), rarely(slowPath));
4870
4871 m_out.appendTo(isObjectPath, slowPath);
4872 structure = loadStructure(object);
4873 m_out.branch(
4874 m_out.testIsZero32(
4875 m_out.load16ZeroExt32(structure, m_heaps.Structure_outOfLineTypeFlags),
4876 m_out.constInt32(OverridesGetPrototypeOutOfLine)),
4877 usually(fastPath), rarely(slowPath));
4878
4879 m_out.appendTo(slowPath, fastPath);
4880 slowResult = m_out.anchor(vmCall(Int64, operationGetPrototypeOf, weakPointer(globalObject), object));
4881 m_out.jump(continuation);
4882
4883 m_out.appendTo(fastPath, loadPolyProto);
4884 break;
4885 }
4886 }
4887
4888 ASSERT(object);
4889 ASSERT(structure);
4890
4891 LValue prototypeBits = m_out.load64(structure, m_heaps.Structure_prototype);
4892 ValueFromBlock monoProto = m_out.anchor(prototypeBits);
4893 m_out.branch(m_out.isZero64(prototypeBits), unsure(loadPolyProto), unsure(continuation));
4894
4895 m_out.appendTo(loadPolyProto, continuation);
4896 ValueFromBlock polyProto = m_out.anchor(
4897 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
4898 m_out.jump(continuation);
4899
4900 m_out.appendTo(continuation, lastNext);
4901 setJSValue(m_out.phi(Int64, monoProto, polyProto, slowResult));
4902 }
4903
4904 void compileGetArrayLength()
4905 {
4906 switch (m_node->arrayMode().type()) {
4907 case Array::Undecided:
4908 case Array::Int32:
4909 case Array::Double:
4910 case Array::Contiguous: {
4911 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
4912 return;
4913 }
4914
4915 case Array::ArrayStorage:
4916 case Array::SlowPutArrayStorage: {
4917 LValue length = m_out.load32(lowStorage(m_node->child2()), m_heaps.ArrayStorage_publicLength);
4918 speculate(Uncountable, noValue(), nullptr, m_out.lessThan(length, m_out.int32Zero));
4919 setInt32(length);
4920 return;
4921 }
4922
4923 case Array::String: {
4924 LValue string = lowCell(m_node->child1());
4925
4926 LBasicBlock ropePath = m_out.newBlock();
4927 LBasicBlock nonRopePath = m_out.newBlock();
4928 LBasicBlock continuation = m_out.newBlock();
4929
4930 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropePath), usually(nonRopePath));
4931
4932 LBasicBlock lastNext = m_out.appendTo(ropePath, nonRopePath);
4933 ValueFromBlock ropeLength = m_out.anchor(m_out.load32NonNegative(string, m_heaps.JSRopeString_length));
4934 m_out.jump(continuation);
4935
4936 m_out.appendTo(nonRopePath, continuation);
4937 ValueFromBlock nonRopeLength = m_out.anchor(m_out.load32NonNegative(m_out.loadPtr(string, m_heaps.JSString_value), m_heaps.StringImpl_length));
4938 m_out.jump(continuation);
4939
4940 m_out.appendTo(continuation, lastNext);
4941 setInt32(m_out.phi(Int32, ropeLength, nonRopeLength));
4942 return;
4943 }
4944
4945 case Array::DirectArguments: {
4946 LValue arguments = lowCell(m_node->child1());
4947 speculate(
4948 ExoticObjectMode, noValue(), nullptr,
4949 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_mappedArguments)));
4950 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
4951 return;
4952 }
4953
4954 case Array::ScopedArguments: {
4955 LValue arguments = lowCell(m_node->child1());
4956 speculate(
4957 ExoticObjectMode, noValue(), nullptr,
4958 m_out.notZero32(m_out.load8ZeroExt32(arguments, m_heaps.ScopedArguments_overrodeThings)));
4959 setInt32(m_out.load32NonNegative(arguments, m_heaps.ScopedArguments_totalLength));
4960 return;
4961 }
4962
4963 default:
4964 if (m_node->arrayMode().isSomeTypedArrayView()) {
4965 setInt32(
4966 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
4967 return;
4968 }
4969
4970 DFG_CRASH(m_graph, m_node, "Bad array type");
4971 return;
4972 }
4973 }
4974
4975 void compileGetVectorLength()
4976 {
4977 switch (m_node->arrayMode().type()) {
4978 case Array::ArrayStorage:
4979 case Array::SlowPutArrayStorage:
4980 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.ArrayStorage_vectorLength));
4981 return;
4982 default:
4983 return;
4984 }
4985 }
4986
4987 void compileAssertInBounds()
4988 {
4989 ASSERT(Options::validateBoundsCheckElimination());
4990 LValue index = lowInt32(m_node->child1());
4991 LValue bounds = lowInt32(m_node->child2());
4992
4993 LBasicBlock outOfBoundsCase = m_out.newBlock();
4994 LBasicBlock continuation = m_out.newBlock();
4995 m_out.branch(m_out.below(index, bounds), usually(continuation), rarely(outOfBoundsCase));
4996
4997 LBasicBlock lastNext = m_out.appendTo(outOfBoundsCase, continuation);
4998 vmCall(Void, operationReportBoundsCheckEliminationErrorAndCrash,
4999 m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())),
5000 m_out.constInt32(m_node->index()),
5001 m_out.constInt32(m_node->child1()->index()),
5002 m_out.constInt32(m_node->child2()->index()),
5003 index, bounds);
5004 m_out.unreachable();
5005
5006 m_out.appendTo(continuation, lastNext);
5007 }
5008
5009 void compileCheckInBounds()
5010 {
5011 speculate(
5012 OutOfBounds, noValue(), nullptr,
5013 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
5014
5015 // Even though we claim to have JSValue result, no user of us should
5016 // depend on our value. Users of this node just need to maintain that
5017 // we dominate them.
5018 }
5019
5020 void compileGetByVal()
5021 {
5022 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
5023 switch (m_node->arrayMode().type()) {
5024 case Array::Int32:
5025 case Array::Contiguous: {
5026 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
5027 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
5028
5029 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
5030 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
5031
5032 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
5033
5034 if (m_node->arrayMode().isInBounds()) {
5035 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
5036 LValue isHole = m_out.isZero64(result);
5037 if (m_node->arrayMode().isInBoundsSaneChain()) {
5038 DFG_ASSERT(
5039 m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous, m_node->arrayMode().type());
5040 result = m_out.select(
5041 isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
5042 } else
5043 speculate(LoadFromHole, noValue(), nullptr, isHole);
5044 // We have to keep base alive to keep content in storage alive.
5045 if (m_node->arrayMode().type() == Array::Contiguous)
5046 ensureStillAliveHere(base);
5047 setJSValue(result);
5048 return;
5049 }
5050
5051 LBasicBlock fastCase = m_out.newBlock();
5052 LBasicBlock slowCase = m_out.newBlock();
5053 LBasicBlock continuation = m_out.newBlock();
5054
5055 m_out.branch(
5056 m_out.aboveOrEqual(
5057 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
5058 rarely(slowCase), usually(fastCase));
5059
5060 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
5061
5062 LValue fastResultValue = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
5063 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
5064 m_out.branch(
5065 m_out.isZero64(fastResultValue), rarely(slowCase), usually(continuation));
5066
5067 m_out.appendTo(slowCase, continuation);
5068 ValueFromBlock slowResult;
5069 if (m_node->arrayMode().isOutOfBoundsSaneChain()) {
5070 speculate(NegativeIndex, noValue(), nullptr, m_out.lessThan(index, m_out.int32Zero));
5071 slowResult = m_out.anchor(m_out.constInt64(JSValue::ValueUndefined));
5072 } else
5073 slowResult = m_out.anchor(vmCall(Int64, operationGetByValObjectInt, weakPointer(globalObject), base, index));
5074 m_out.jump(continuation);
5075
5076 m_out.appendTo(continuation, lastNext);
5077 // We have to keep base alive to keep content in storage alive.
5078 if (m_node->arrayMode().type() == Array::Contiguous)
5079 ensureStillAliveHere(base);
5080 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5081 return;
5082 }
5083
5084 case Array::Double: {
5085 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
5086 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
5087 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
5088
5089 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
5090
5091 if (m_node->arrayMode().isInBounds()) {
5092 LValue result = m_out.loadDouble(
5093 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
5094
5095 if (!m_node->arrayMode().isInBoundsSaneChain()) {
5096 speculate(
5097 LoadFromHole, noValue(), nullptr,
5098 m_out.doubleNotEqualOrUnordered(result, result));
5099 }
5100 setDouble(result);
5101 break;
5102 }
5103
5104 bool resultIsUnboxed = m_node->arrayMode().isOutOfBoundsSaneChain() && !(m_node->flags() & NodeBytecodeUsesAsOther);
5105
5106 LBasicBlock inBounds = m_out.newBlock();
5107 LBasicBlock boxPath = m_out.newBlock();
5108 LBasicBlock slowCase = m_out.newBlock();
5109 LBasicBlock continuation = m_out.newBlock();
5110
5111 m_out.branch(
5112 m_out.aboveOrEqual(
5113 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
5114 rarely(slowCase), usually(inBounds));
5115
5116 LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
5117 LValue doubleValue = m_out.loadDouble(
5118 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
5119 m_out.branch(
5120 m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
5121 rarely(slowCase), usually(boxPath));
5122
5123 m_out.appendTo(boxPath, slowCase);
5124 ValueFromBlock fastResult = m_out.anchor(resultIsUnboxed ? doubleValue : boxDouble(doubleValue));
5125 m_out.jump(continuation);
5126
5127 m_out.appendTo(slowCase, continuation);
5128 ValueFromBlock slowResult;
5129 if (m_node->arrayMode().isOutOfBoundsSaneChain()) {
5130 speculate(NegativeIndex, noValue(), nullptr, m_out.lessThan(index, m_out.int32Zero));
5131 if (resultIsUnboxed)
5132 slowResult = m_out.anchor(m_out.constDouble(PNaN));
5133 else
5134 slowResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())));
5135 } else
5136 slowResult = m_out.anchor(vmCall(Int64, operationGetByValObjectInt, weakPointer(globalObject), base, index));
5137 m_out.jump(continuation);
5138
5139 m_out.appendTo(continuation, lastNext);
5140 if (resultIsUnboxed)
5141 setDouble(m_out.phi(Double, fastResult, slowResult));
5142 else
5143 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5144 return;
5145 }
5146
5147 case Array::Undecided: {
5148 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
5149
5150 speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero));
5151 setJSValue(m_out.constInt64(JSValue::ValueUndefined));
5152 return;
5153 }
5154
5155 case Array::DirectArguments: {
5156 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
5157 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
5158
5159 speculate(
5160 ExoticObjectMode, noValue(), nullptr,
5161 m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_mappedArguments)));
5162
5163 LValue length = m_out.load32NonNegative(base, m_heaps.DirectArguments_length);
5164 auto isOutOfBounds = m_out.aboveOrEqual(index, length);
5165 if (m_node->arrayMode().isInBounds()) {
5166 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
5167 TypedPointer address = m_out.baseIndex(
5168 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
5169 setJSValue(m_out.load64(address));
5170 return;
5171 }
5172
5173 LBasicBlock inBounds = m_out.newBlock();
5174 LBasicBlock slowCase = m_out.newBlock();
5175 LBasicBlock continuation = m_out.newBlock();
5176
5177 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBounds));
5178
5179 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
5180 TypedPointer address = m_out.baseIndex(
5181 m_heaps.DirectArguments_storage,
5182 base,
5183 m_out.zeroExt(index, pointerType()));
5184 ValueFromBlock fastResult = m_out.anchor(m_out.load64(address));
5185 m_out.jump(continuation);
5186
5187 m_out.appendTo(slowCase, continuation);
5188 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operationGetByValObjectInt, weakPointer(globalObject), base, index));
5189 m_out.jump(continuation);
5190
5191 m_out.appendTo(continuation, lastNext);
5192 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5193 return;
5194 }
5195
5196 case Array::ScopedArguments: {
5197 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
5198 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
5199
5200 speculate(
5201 ExoticObjectMode, noValue(), nullptr,
5202 m_out.aboveOrEqual(
5203 index,
5204 m_out.load32NonNegative(base, m_heaps.ScopedArguments_totalLength)));
5205
5206 LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
5207 LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
5208
5209 LBasicBlock namedCase = m_out.newBlock();
5210 LBasicBlock overflowCase = m_out.newBlock();
5211 LBasicBlock continuation = m_out.newBlock();
5212
5213 m_out.branch(
5214 m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
5215
5216 LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
5217
5218 LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
5219 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
5220
5221 TypedPointer address = m_out.baseIndex(
5222 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
5223 LValue scopeOffset = m_out.load32(address);
5224
5225 speculate(
5226 ExoticObjectMode, noValue(), nullptr,
5227 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
5228
5229 address = m_out.baseIndex(
5230 m_heaps.JSLexicalEnvironment_variables, scope, m_out.zeroExtPtr(scopeOffset));
5231 ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
5232 m_out.jump(continuation);
5233
5234 m_out.appendTo(overflowCase, continuation);
5235
5236 LValue storage = m_out.loadPtr(base, m_heaps.ScopedArguments_storage);
5237 address = m_out.baseIndex(
5238 m_heaps.ScopedArguments_Storage_storage, storage,
5239 m_out.zeroExtPtr(m_out.sub(index, namedLength)));
5240 LValue overflowValue = m_out.load64(address);
5241 speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
5242 ValueFromBlock overflowResult = m_out.anchor(overflowValue);
5243 m_out.jump(continuation);
5244
5245 m_out.appendTo(continuation, lastNext);
5246 setJSValue(m_out.phi(Int64, namedResult, overflowResult));
5247 return;
5248 }
5249
5250 case Array::BigInt64Array:
5251 case Array::BigUint64Array:
5252 case Array::Generic: {
5253 if (m_graph.m_slowGetByVal.contains(m_node)) {
5254 if (m_graph.varArgChild(m_node, 0).useKind() == ObjectUse) {
5255 if (m_graph.varArgChild(m_node, 1).useKind() == StringUse) {
5256 setJSValue(vmCall(
5257 Int64, operationGetByValObjectString, weakPointer(globalObject),
5258 lowObject(m_graph.varArgChild(m_node, 0)), lowString(m_graph.varArgChild(m_node, 1))));
5259 return;
5260 }
5261
5262 if (m_graph.varArgChild(m_node, 1).useKind() == SymbolUse) {
5263 setJSValue(vmCall(
5264 Int64, operationGetByValObjectSymbol, weakPointer(globalObject),
5265 lowObject(m_graph.varArgChild(m_node, 0)), lowSymbol(m_graph.varArgChild(m_node, 1))));
5266 return;
5267 }
5268 }
5269
5270 setJSValue(vmCall(
5271 Int64, operationGetByVal, weakPointer(globalObject),
5272 lowJSValue(m_graph.varArgChild(m_node, 0)), lowJSValue(m_graph.varArgChild(m_node, 1))));
5273 return;
5274 }
5275
5276 Node* node = m_node;
5277
5278 LValue base = lowJSValue(m_graph.varArgChild(node, 0), ManualOperandSpeculation);
5279 LValue property = lowJSValue(m_graph.varArgChild(node, 1), ManualOperandSpeculation);
5280
5281 speculate(m_graph.varArgChild(node, 0));
5282 speculate(m_graph.varArgChild(node, 1));
5283 bool baseIsCell = abstractValue(m_graph.varArgChild(node, 0)).isType(SpecCell);
5284 bool propertyIsString = false;
5285 bool propertyIsInt32 = false;
5286 bool propertyIsSymbol = false;
5287 if (abstractValue(m_graph.varArgChild(node, 1)).isType(SpecString))
5288 propertyIsString = true;
5289 else if (abstractValue(m_graph.varArgChild(node, 1)).isType(SpecInt32Only))
5290 propertyIsInt32 = true;
5291 else if (abstractValue(m_graph.varArgChild(node, 1)).isType(SpecSymbol))
5292 propertyIsSymbol = true;
5293
5294 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
5295 patchpoint->appendSomeRegister(base);
5296 patchpoint->appendSomeRegister(property);
5297 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
5298 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
5299 patchpoint->clobber(RegisterSet::macroScratchRegisters());
5300 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 1 : 0;
5301
5302 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
5303
5304 State* state = &m_ftlState;
5305 CodeOrigin nodeSemanticOrigin = node->origin.semantic;
5306 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
5307 AllowMacroScratchRegisterUsage allowScratch(jit);
5308
5309 CallSiteIndex callSiteIndex = state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(nodeSemanticOrigin);
5310
5311 // This is the direct exit target for operation calls.
5312 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
5313
5314 // This is the exit for call IC's created by the IC for getters. We don't have
5315 // to do anything weird other than call this, since it will associate the exit with
5316 // the callsite index.
5317 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
5318
5319 GPRReg resultGPR = params[0].gpr();
5320 GPRReg baseGPR = params[1].gpr();
5321 GPRReg propertyGPR = params[2].gpr();
5322 GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(0) : InvalidGPRReg;
5323
5324 auto generator = Box<JITGetByValGenerator>::create(
5325 jit.codeBlock(), JITType::FTLJIT, nodeSemanticOrigin, callSiteIndex, AccessType::GetByVal,
5326 params.unavailableRegisters(), JSValueRegs(baseGPR), JSValueRegs(propertyGPR), JSValueRegs(resultGPR), stubInfoGPR);
5327
5328 generator->stubInfo()->propertyIsString = propertyIsString;
5329 generator->stubInfo()->propertyIsInt32 = propertyIsInt32;
5330 generator->stubInfo()->propertyIsSymbol = propertyIsSymbol;
5331
5332 CCallHelpers::Jump notCell;
5333 if (!baseIsCell)
5334 notCell = jit.branchIfNotCell(baseGPR);
5335
5336 generator->generateFastPath(jit);
5337 CCallHelpers::Label done = jit.label();
5338
5339 params.addLatePath([=] (CCallHelpers& jit) {
5340 AllowMacroScratchRegisterUsage allowScratch(jit);
5341
5342 if (notCell.isSet())
5343 notCell.link(&jit);
5344 if (!JITCode::useDataIC(JITType::FTLJIT))
5345 generator->slowPathJump().link(&jit);
5346 CCallHelpers::Label slowPathBegin = jit.label();
5347 CCallHelpers::Call slowPathCall;
5348 if (JITCode::useDataIC(JITType::FTLJIT)) {
5349 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
5350 generator->stubInfo()->m_slowOperation = operationGetByValOptimize;
5351 slowPathCall = callOperation(
5352 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
5353 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), resultGPR,
5354 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
5355 stubInfoGPR, CCallHelpers::TrustedImmPtr(nullptr), baseGPR, propertyGPR).call();
5356 } else {
5357 slowPathCall = callOperation(
5358 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
5359 exceptions.get(), operationGetByValOptimize, resultGPR,
5360 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
5361 CCallHelpers::TrustedImmPtr(generator->stubInfo()), CCallHelpers::TrustedImmPtr(nullptr), baseGPR, propertyGPR).call();
5362 }
5363 jit.jump().linkTo(done, &jit);
5364
5365 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
5366
5367 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
5368 generator->finalize(linkBuffer, linkBuffer);
5369 });
5370 });
5371 });
5372
5373 setJSValue(patchpoint);
5374 return;
5375 }
5376
5377 case Array::ArrayStorage:
5378 case Array::SlowPutArrayStorage: {
5379 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
5380 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
5381 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
5382
5383 IndexedAbstractHeap& heap = m_heaps.ArrayStorage_vector;
5384
5385 if (m_node->arrayMode().isInBounds()) {
5386 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
5387 speculate(LoadFromHole, noValue(), nullptr, m_out.isZero64(result));
5388 // We have to keep base alive to keep content in storage alive.
5389 ensureStillAliveHere(base);
5390 setJSValue(result);
5391 return;
5392 }
5393
5394 LBasicBlock inBounds = m_out.newBlock();
5395 LBasicBlock slowCase = m_out.newBlock();
5396 LBasicBlock continuation = m_out.newBlock();
5397
5398 m_out.branch(
5399 m_out.aboveOrEqual(index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
5400 rarely(slowCase), usually(inBounds));
5401
5402 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
5403 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
5404 ValueFromBlock fastResult = m_out.anchor(result);
5405 m_out.branch(
5406 m_out.isZero64(result),
5407 rarely(slowCase), usually(continuation));
5408
5409 m_out.appendTo(slowCase, continuation);
5410 ValueFromBlock slowResult = m_out.anchor(
5411 vmCall(Int64, operationGetByValObjectInt, weakPointer(globalObject), base, index));
5412 m_out.jump(continuation);
5413
5414 m_out.appendTo(continuation, lastNext);
5415 // We have to keep base alive to keep content in storage alive.
5416 ensureStillAliveHere(base);
5417 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5418 return;
5419 }
5420
5421 case Array::String: {
5422 compileStringCharAt();
5423 return;
5424 }
5425
5426 case Array::Int8Array:
5427 case Array::Int16Array:
5428 case Array::Int32Array:
5429 case Array::Uint8Array:
5430 case Array::Uint8ClampedArray:
5431 case Array::Uint16Array:
5432 case Array::Uint32Array:
5433 case Array::Float32Array:
5434 case Array::Float64Array: {
5435 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
5436 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
5437 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
5438
5439 TypedArrayType type = m_node->arrayMode().typedArrayType();
5440 ASSERT(isTypedView(type));
5441 {
5442 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
5443
5444 if (isInt(type)) {
5445 LValue result = loadFromIntTypedArray(pointer, type);
5446 // We have to keep base alive since that keeps storage alive.
5447 ensureStillAliveHere(base);
5448 constexpr bool canSpeculate = true;
5449 setIntTypedArrayLoadResult(result, type, canSpeculate);
5450 return;
5451 }
5452
5453 ASSERT(isFloat(type));
5454
5455 LValue result;
5456 switch (type) {
5457 case TypeFloat32:
5458 result = m_out.floatToDouble(m_out.loadFloat(pointer));
5459 break;
5460 case TypeFloat64:
5461 result = m_out.loadDouble(pointer);
5462 break;
5463 default:
5464 DFG_CRASH(m_graph, m_node, "Bad typed array type");
5465 }
5466
5467 // We have to keep base alive since that keeps storage alive.
5468 ensureStillAliveHere(base);
5469 setDouble(result);
5470 return;
5471 }
5472 }
5473
5474 case Array::AnyTypedArray:
5475 case Array::ForceExit:
5476 case Array::SelectUsingArguments:
5477 case Array::SelectUsingPredictions:
5478 case Array::Unprofiled:
5479 DFG_CRASH(m_graph, m_node, "Bad array type");
5480 return;
5481 }
5482 }
5483
5484 void compileGetMyArgumentByVal()
5485 {
5486 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
5487
5488 LValue originalIndex = lowInt32(m_node->child2());
5489
5490 LValue numberOfArgsIncludingThis;
5491 if (inlineCallFrame && !inlineCallFrame->isVarargs())
5492 numberOfArgsIncludingThis = m_out.constInt32(inlineCallFrame->argumentCountIncludingThis);
5493 else {
5494 VirtualRegister argumentCountRegister = AssemblyHelpers::argumentCount(inlineCallFrame);
5495 numberOfArgsIncludingThis = m_out.load32(payloadFor(argumentCountRegister));
5496 }
5497
5498 LValue numberOfArgs = m_out.sub(numberOfArgsIncludingThis, m_out.int32One);
5499 LValue indexToCheck = originalIndex;
5500 LValue numberOfArgumentsToSkip = m_out.int32Zero;
5501 if (m_node->numberOfArgumentsToSkip()) {
5502 numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5503 CheckValue* check = m_out.speculateAdd(indexToCheck, numberOfArgumentsToSkip);
5504 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
5505 indexToCheck = check;
5506 }
5507
5508 LValue isOutOfBounds = m_out.bitOr(m_out.aboveOrEqual(indexToCheck, numberOfArgs), m_out.below(indexToCheck, numberOfArgumentsToSkip));
5509 LBasicBlock continuation = nullptr;
5510 LBasicBlock lastNext = nullptr;
5511 ValueFromBlock slowResult;
5512 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
5513 LBasicBlock normalCase = m_out.newBlock();
5514 continuation = m_out.newBlock();
5515
5516 slowResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())));
5517 m_out.branch(isOutOfBounds, unsure(continuation), unsure(normalCase));
5518
5519 lastNext = m_out.appendTo(normalCase, continuation);
5520 } else
5521 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
5522
5523 LValue index = m_out.add(indexToCheck, m_out.int32One);
5524
5525 TypedPointer base;
5526 if (inlineCallFrame) {
5527 if (inlineCallFrame->argumentCountIncludingThis > 1)
5528 base = addressFor(inlineCallFrame->m_argumentsWithFixup[0].virtualRegister());
5529 } else
5530 base = addressFor(virtualRegisterForArgumentIncludingThis(0));
5531
5532 LValue result;
5533 if (base) {
5534 LValue pointer = m_out.baseIndex(
5535 base.value(), m_out.zeroExt(index, pointerType()), ScaleEight);
5536 result = m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer));
5537 } else
5538 result = m_out.constInt64(JSValue::encode(jsUndefined()));
5539
5540 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
5541 ValueFromBlock normalResult = m_out.anchor(result);
5542 m_out.jump(continuation);
5543
5544 m_out.appendTo(continuation, lastNext);
5545 result = m_out.phi(Int64, slowResult, normalResult);
5546 }
5547
5548 setJSValue(result);
5549 }
5550
5551 void compilePutByVal()
5552 {
5553 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
5554 Edge child1 = m_graph.varArgChild(m_node, 0);
5555 Edge child2 = m_graph.varArgChild(m_node, 1);
5556 Edge child3 = m_graph.varArgChild(m_node, 2);
5557 Edge child4 = m_graph.varArgChild(m_node, 3);
5558 Edge child5 = m_graph.varArgChild(m_node, 4);
5559
5560 ArrayMode arrayMode = m_node->arrayMode().modeForPut();
5561 switch (arrayMode.type()) {
5562 case Array::BigInt64Array:
5563 case Array::BigUint64Array:
5564 case Array::Generic: {
5565 if (child1.useKind() == CellUse) {
5566 V_JITOperation_GCCJ operation = nullptr;
5567 if (child2.useKind() == StringUse) {
5568 if (m_node->op() == PutByValDirect) {
5569 if (m_node->ecmaMode().isStrict())
5570 operation = operationPutByValDirectCellStringStrict;
5571 else
5572 operation = operationPutByValDirectCellStringNonStrict;
5573 } else {
5574 if (m_node->ecmaMode().isStrict())
5575 operation = operationPutByValCellStringStrict;
5576 else
5577 operation = operationPutByValCellStringNonStrict;
5578 }
5579 vmCall(Void, operation, weakPointer(globalObject), lowCell(child1), lowString(child2), lowJSValue(child3));
5580 return;
5581 }
5582
5583 if (child2.useKind() == SymbolUse) {
5584 if (m_node->op() == PutByValDirect) {
5585 if (m_node->ecmaMode().isStrict())
5586 operation = operationPutByValDirectCellSymbolStrict;
5587 else
5588 operation = operationPutByValDirectCellSymbolNonStrict;
5589 } else {
5590 if (m_node->ecmaMode().isStrict())
5591 operation = operationPutByValCellSymbolStrict;
5592 else
5593 operation = operationPutByValCellSymbolNonStrict;
5594 }
5595 vmCall(Void, operation, weakPointer(globalObject), lowCell(child1), lowSymbol(child2), lowJSValue(child3));
5596 return;
5597 }
5598 }
5599
5600 V_JITOperation_GJJJ operation;
5601 if (m_node->op() == PutByValDirect) {
5602 if (m_node->ecmaMode().isStrict())
5603 operation = operationPutByValDirectStrict;
5604 else
5605 operation = operationPutByValDirectNonStrict;
5606 } else {
5607 if (m_node->ecmaMode().isStrict())
5608 operation = operationPutByValStrict;
5609 else
5610 operation = operationPutByValNonStrict;
5611 }
5612
5613 vmCall(
5614 Void, operation, weakPointer(globalObject),
5615 lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
5616 return;
5617 }
5618
5619 default:
5620 break;
5621 }
5622
5623 LValue base = lowCell(child1);
5624 LValue index = lowInt32(child2);
5625 LValue storage = lowStorage(child4);
5626
5627 switch (arrayMode.type()) {
5628 case Array::Int32:
5629 case Array::Double:
5630 case Array::Contiguous: {
5631 LBasicBlock continuation = m_out.newBlock();
5632 LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
5633
5634 switch (arrayMode.type()) {
5635 case Array::Int32:
5636 case Array::Contiguous: {
5637 LValue value = lowJSValue(child3, ManualOperandSpeculation);
5638
5639 if (arrayMode.type() == Array::Int32)
5640 FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32Only, isNotInt32(value));
5641
5642 TypedPointer elementPointer = m_out.baseIndex(
5643 arrayMode.type() == Array::Int32 ?
5644 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
5645 storage, m_out.zeroExtPtr(index), provenValue(child2));
5646
5647 if (m_node->op() == PutByValAlias) {
5648 m_out.store64(value, elementPointer);
5649 break;
5650 }
5651
5652 contiguousPutByValOutOfBounds(
5653 m_node->ecmaMode().isStrict()
5654 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
5655 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict),
5656 base, storage, index, value, continuation);
5657
5658 m_out.store64(value, elementPointer);
5659 break;
5660 }
5661
5662 case Array::Double: {
5663 LValue value = lowDouble(child3);
5664
5665 FTL_TYPE_CHECK(
5666 doubleValue(value), child3, SpecDoubleReal,
5667 m_out.doubleNotEqualOrUnordered(value, value));
5668
5669 TypedPointer elementPointer = m_out.baseIndex(
5670 m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
5671 provenValue(child2));
5672
5673 if (m_node->op() == PutByValAlias) {
5674 m_out.storeDouble(value, elementPointer);
5675 break;
5676 }
5677
5678 contiguousPutByValOutOfBounds(
5679 m_node->ecmaMode().isStrict()
5680 ? (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
5681 : (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
5682 base, storage, index, value, continuation);
5683
5684 m_out.storeDouble(value, elementPointer);
5685 break;
5686 }
5687
5688 default:
5689 DFG_CRASH(m_graph, m_node, "Bad array type");
5690 }
5691
5692 m_out.jump(continuation);
5693 m_out.appendTo(continuation, outerLastNext);
5694 return;
5695 }
5696
5697 case Array::ArrayStorage:
5698 case Array::SlowPutArrayStorage: {
5699 LValue value = lowJSValue(child3);
5700
5701 TypedPointer elementPointer = m_out.baseIndex(
5702 m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(index),
5703 provenValue(child2));
5704
5705 if (m_node->op() == PutByValAlias) {
5706 m_out.store64(value, elementPointer);
5707 return;
5708 }
5709
5710 if (arrayMode.isInBounds()) {
5711 speculate(StoreToHole, noValue(), nullptr, m_out.isZero64(m_out.load64(elementPointer)));
5712 m_out.store64(value, elementPointer);
5713 return;
5714 }
5715
5716 LValue isOutOfBounds = m_out.aboveOrEqual(
5717 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength));
5718
5719 auto slowPathFunction = m_node->ecmaMode().isStrict()
5720 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
5721 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict);
5722 if (!arrayMode.isOutOfBounds()) {
5723 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
5724 isOutOfBounds = m_out.booleanFalse;
5725 }
5726
5727 LBasicBlock inBoundCase = m_out.newBlock();
5728 LBasicBlock slowCase = m_out.newBlock();
5729 LBasicBlock holeCase = m_out.newBlock();
5730 LBasicBlock doStoreCase = m_out.newBlock();
5731 LBasicBlock lengthUpdateCase = m_out.newBlock();
5732 LBasicBlock continuation = m_out.newBlock();
5733
5734 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBoundCase));
5735
5736 LBasicBlock lastNext = m_out.appendTo(slowCase, inBoundCase);
5737 vmCall(
5738 Void, slowPathFunction,
5739 weakPointer(globalObject), base, index, value);
5740 m_out.jump(continuation);
5741
5742
5743 if (arrayMode.isSlowPut()) {
5744 m_out.appendTo(inBoundCase, doStoreCase);
5745 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(slowCase), usually(doStoreCase));
5746 } else {
5747 m_out.appendTo(inBoundCase, holeCase);
5748 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(holeCase), usually(doStoreCase));
5749
5750 m_out.appendTo(holeCase, lengthUpdateCase);
5751 m_out.store32(
5752 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
5753 storage, m_heaps.ArrayStorage_numValuesInVector);
5754 m_out.branch(
5755 m_out.below(
5756 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_publicLength)),
5757 unsure(doStoreCase), unsure(lengthUpdateCase));
5758
5759 m_out.appendTo(lengthUpdateCase, doStoreCase);
5760 m_out.store32(
5761 m_out.add(index, m_out.int32One),
5762 storage, m_heaps.ArrayStorage_publicLength);
5763 m_out.jump(doStoreCase);
5764 }
5765
5766 m_out.appendTo(doStoreCase, continuation);
5767 m_out.store64(value, elementPointer);
5768 m_out.jump(continuation);
5769
5770 m_out.appendTo(continuation, lastNext);
5771 return;
5772 }
5773
5774 case Array::Int8Array:
5775 case Array::Int16Array:
5776 case Array::Int32Array:
5777 case Array::Uint8Array:
5778 case Array::Uint8ClampedArray:
5779 case Array::Uint16Array:
5780 case Array::Uint32Array:
5781 case Array::Float32Array:
5782 case Array::Float64Array: {
5783 TypedArrayType type = arrayMode.typedArrayType();
5784
5785 ASSERT(isTypedView(type));
5786 {
5787 TypedPointer pointer = TypedPointer(
5788 m_heaps.typedArrayProperties,
5789 m_out.add(
5790 storage,
5791 m_out.shl(
5792 m_out.zeroExt(index, pointerType()),
5793 m_out.constIntPtr(logElementSize(type)))));
5794
5795 LValue valueToStore;
5796
5797 if (isInt(type)) {
5798 LValue intValue = getIntTypedArrayStoreOperand(child3, isClamped(type));
5799
5800 valueToStore = intValue;
5801 } else /* !isInt(type) */ {
5802 LValue value = lowDouble(child3);
5803 switch (type) {
5804 case TypeFloat32:
5805 valueToStore = m_out.doubleToFloat(value);
5806 break;
5807 case TypeFloat64:
5808 valueToStore = value;
5809 break;
5810 default:
5811 DFG_CRASH(m_graph, m_node, "Bad typed array type");
5812 }
5813 }
5814
5815 if (arrayMode.isInBounds() || m_node->op() == PutByValAlias)
5816 m_out.store(valueToStore, pointer, storeType(type));
5817 else {
5818 LBasicBlock isInBounds = m_out.newBlock();
5819 LBasicBlock isOutOfBounds = m_out.newBlock();
5820 LBasicBlock continuation = m_out.newBlock();
5821
5822 m_out.branch(
5823 m_out.aboveOrEqual(index, lowInt32(child5)),
5824 unsure(isOutOfBounds), unsure(isInBounds));
5825
5826 LBasicBlock lastNext = m_out.appendTo(isInBounds, isOutOfBounds);
5827 m_out.store(valueToStore, pointer, storeType(type));
5828 m_out.jump(continuation);
5829
5830 m_out.appendTo(isOutOfBounds, continuation);
5831 speculateTypedArrayIsNotDetached(base);
5832 m_out.jump(continuation);
5833
5834 m_out.appendTo(continuation, lastNext);
5835 }
5836
5837 // We have to keep base alive since that keeps storage alive.
5838 ensureStillAliveHere(base);
5839 return;
5840 }
5841 }
5842
5843 case Array::AnyTypedArray:
5844 case Array::String:
5845 case Array::DirectArguments:
5846 case Array::ForceExit:
5847 case Array::Generic:
5848 case Array::ScopedArguments:
5849 case Array::SelectUsingArguments:
5850 case Array::SelectUsingPredictions:
5851 case Array::Undecided:
5852 case Array::Unprofiled:
5853 case Array::BigInt64Array:
5854 case Array::BigUint64Array:
5855 DFG_CRASH(m_graph, m_node, "Bad array type");
5856 break;
5857 }
5858 }
5859
5860 void compilePutAccessorById()
5861 {
5862 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
5863 LValue base = lowCell(m_node->child1());
5864 LValue accessor = lowCell(m_node->child2());
5865 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
5866 vmCall(
5867 Void,
5868 m_node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById,
5869 weakPointer(globalObject), base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), accessor);
5870 }
5871
5872 void compilePutGetterSetterById()
5873 {
5874 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
5875 LValue base = lowCell(m_node->child1());
5876 LValue getter = lowJSValue(m_node->child2());
5877 LValue setter = lowJSValue(m_node->child3());
5878 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
5879 vmCall(
5880 Void, operationPutGetterSetter,
5881 weakPointer(globalObject), base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), getter, setter);
5882
5883 }
5884
5885 void compilePutAccessorByVal()
5886 {
5887 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
5888 LValue base = lowCell(m_node->child1());
5889 LValue subscript = lowJSValue(m_node->child2());
5890 LValue accessor = lowCell(m_node->child3());
5891 vmCall(
5892 Void,
5893 m_node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal,
5894 weakPointer(globalObject), base, subscript, m_out.constInt32(m_node->accessorAttributes()), accessor);
5895 }
5896
5897 template<DelByKind kind, typename SubscriptKind>
5898 void compileDelBy(LValue base, SubscriptKind subscriptValue)
5899 {
5900 PatchpointValue* patchpoint;
5901 if constexpr (kind == DelByKind::ById) {
5902 patchpoint = m_out.patchpoint(Int64);
5903 patchpoint->append(ConstrainedValue(base, ValueRep::SomeLateRegister));
5904 } else {
5905 patchpoint = m_out.patchpoint(Int64);
5906 patchpoint->append(ConstrainedValue(base, ValueRep::SomeLateRegister));
5907 patchpoint->append(ConstrainedValue(subscriptValue, ValueRep::SomeLateRegister));
5908 }
5909 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
5910 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
5911 patchpoint->clobber(RegisterSet::macroScratchRegisters());
5912 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 2 : 1;
5913
5914 RefPtr<PatchpointExceptionHandle> exceptionHandle =
5915 preparePatchpointForExceptions(patchpoint);
5916
5917 State* state = &m_ftlState;
5918 Node* node = m_node;
5919 CodeOrigin nodeSemanticOrigin = node->origin.semantic;
5920 UseKind child1UseKind = node->child1().useKind();
5921 UseKind child2UseKind = UntypedUse;
5922 if constexpr (kind != DelByKind::ById)
5923 child2UseKind = node->child2().useKind();
5924 auto ecmaMode = node->ecmaMode().value();
5925 patchpoint->setGenerator(
5926 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
5927 AllowMacroScratchRegisterUsage allowScratch(jit);
5928
5929 CallSiteIndex callSiteIndex =
5930 state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(nodeSemanticOrigin);
5931
5932 Box<CCallHelpers::JumpList> exceptions =
5933 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
5934 CCallHelpers::JumpList slowCases;
5935
5936 auto base = JSValueRegs(params[1].gpr());
5937 auto returnGPR = params[0].gpr();
5938 auto scratchGPR = params.gpScratch(0);
5939 auto stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(1) : InvalidGPRReg;
5940 ASSERT(base.gpr() != returnGPR);
5941 ASSERT(base.gpr() != scratchGPR);
5942 ASSERT(returnGPR != scratchGPR);
5943
5944 if (child1UseKind)
5945 slowCases.append(jit.branchIfNotCell(base));
5946
5947 constexpr auto optimizationFunction = [&] () {
5948 if constexpr (kind == DelByKind::ById)
5949 return operationDeleteByIdOptimize;
5950 else
5951 return operationDeleteByValOptimize;
5952 }();
5953
5954 const auto subscript = [&] {
5955 if constexpr (kind == DelByKind::ById)
5956 return CCallHelpers::TrustedImmPtr(subscriptValue.rawBits());
5957 else {
5958 ASSERT(scratchGPR != params[2].gpr());
5959 if (child2UseKind == UntypedUse)
5960 slowCases.append(jit.branchIfNotCell(JSValueRegs(params[2].gpr())));
5961 return JSValueRegs(params[2].gpr());
5962 }
5963 }();
5964
5965 const auto generator = [&] {
5966 if constexpr (kind == DelByKind::ById) {
5967 return Box<JITDelByIdGenerator>::create(
5968 jit.codeBlock(), JITType::FTLJIT, nodeSemanticOrigin, callSiteIndex,
5969 params.unavailableRegisters(), subscriptValue, base,
5970 JSValueRegs(returnGPR), stubInfoGPR, scratchGPR);
5971 } else {
5972 return Box<JITDelByValGenerator>::create(
5973 jit.codeBlock(), JITType::FTLJIT, nodeSemanticOrigin, callSiteIndex,
5974 params.unavailableRegisters(), base,
5975 subscript, JSValueRegs(returnGPR), stubInfoGPR, scratchGPR);
5976 }
5977 }();
5978
5979 generator->generateFastPath(jit);
5980 if (!JITCode::useDataIC(JITType::FTLJIT))
5981 slowCases.append(generator->slowPathJump());
5982 CCallHelpers::Label done = jit.label();
5983
5984 params.addLatePath(
5985 [=] (CCallHelpers& jit) {
5986 AllowMacroScratchRegisterUsage allowScratch(jit);
5987
5988 slowCases.link(&jit);
5989 CCallHelpers::Label slowPathBegin = jit.label();
5990 CCallHelpers::Call slowPathCall;
5991 if (JITCode::useDataIC(JITType::FTLJIT)) {
5992 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
5993 generator->stubInfo()->m_slowOperation = optimizationFunction;
5994 slowPathCall = callOperation(
5995 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
5996 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), returnGPR,
5997 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
5998 stubInfoGPR, base,
5999 subscript, CCallHelpers::TrustedImm32(ecmaMode)).call();
6000 } else {
6001 slowPathCall = callOperation(
6002 *state, params.unavailableRegisters(), jit, nodeSemanticOrigin,
6003 exceptions.get(), optimizationFunction, returnGPR,
6004 jit.codeBlock()->globalObjectFor(nodeSemanticOrigin),
6005 CCallHelpers::TrustedImmPtr(generator->stubInfo()), base,
6006 subscript, CCallHelpers::TrustedImm32(ecmaMode)).call();
6007 }
6008 jit.jump().linkTo(done, &jit);
6009
6010 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
6011
6012 jit.addLinkTask(
6013 [=] (LinkBuffer& linkBuffer) {
6014 generator->finalize(linkBuffer, linkBuffer);
6015 });
6016 });
6017 });
6018
6019 setBoolean(m_out.notZero64(patchpoint));
6020 }
6021
6022 void compileDeleteById()
6023 {
6024 switch (m_node->child1().useKind()) {
6025 case CellUse: {
6026 LValue base = lowCell(m_node->child1());
6027 compileDelBy<DelByKind::ById>(base, m_node->cacheableIdentifier());
6028 break;
6029 }
6030
6031 case UntypedUse: {
6032 // FIXME: We should use IC even if child1 is UntypedUse. In that case, we should emit write-barrier after tha fast path of IC.
6033 // https://bugs.webkit.org/show_bug.cgi?id=209397
6034 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6035 LValue base = lowJSValue(m_node->child1());
6036 setBoolean(m_out.notZero64(vmCall(Int64, operationDeleteByIdGeneric, weakPointer(globalObject), m_out.intPtrZero, base, m_out.constIntPtr(m_node->cacheableIdentifier().rawBits()), m_out.constInt32(m_node->ecmaMode().value()))));
6037 break;
6038 }
6039
6040 default:
6041 DFG_CRASH(m_graph, m_node, "Bad use kind");
6042 return;
6043 }
6044 }
6045
6046 void compileDeleteByVal()
6047 {
6048 switch (m_node->child1().useKind()) {
6049 case CellUse: {
6050 LValue base = lowCell(m_node->child1());
6051 LValue subscript;
6052 switch (m_node->child2().useKind()) {
6053 case CellUse: {
6054 subscript = lowCell(m_node->child2());
6055 break;
6056 }
6057
6058 case UntypedUse: {
6059 subscript = lowJSValue(m_node->child2());
6060 break;
6061 }
6062
6063 default:
6064 DFG_CRASH(m_graph, m_node, "Bad use kind");
6065 return;
6066 }
6067 compileDelBy<DelByKind::ByVal>(base, subscript);
6068 return;
6069 }
6070
6071 case UntypedUse: {
6072 // FIXME: We should use IC even if child1 is UntypedUse. In that case, we should emit write-barrier after tha fast path of IC.
6073 // https://bugs.webkit.org/show_bug.cgi?id=209397
6074 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6075 LValue base = lowJSValue(m_node->child1());
6076 LValue subscript = lowJSValue(m_node->child2());
6077 setBoolean(m_out.notZero64(vmCall(Int64, operationDeleteByValGeneric, weakPointer(globalObject), m_out.intPtrZero, base, subscript, m_out.constInt32(m_node->ecmaMode().value()))));
6078 return;
6079 }
6080
6081 default:
6082 DFG_CRASH(m_graph, m_node, "Bad use kind");
6083 return;
6084 }
6085 }
6086
6087 void compileArrayPush()
6088 {
6089 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6090 LValue base = lowCell(m_graph.varArgChild(m_node, 1));
6091 LValue storage = lowStorage(m_graph.varArgChild(m_node, 0));
6092 unsigned elementOffset = 2;
6093 unsigned elementCount = m_node->numChildren() - elementOffset;
6094
6095 switch (m_node->arrayMode().type()) {
6096 case Array::Int32:
6097 case Array::Contiguous:
6098 case Array::Double: {
6099 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
6100
6101 if (elementCount == 1) {
6102 LValue value;
6103 Output::StoreType storeType;
6104
6105 Edge& element = m_graph.varArgChild(m_node, elementOffset);
6106 speculate(element);
6107 if (m_node->arrayMode().type() != Array::Double) {
6108 value = lowJSValue(element, ManualOperandSpeculation);
6109 storeType = Output::Store64;
6110 } else {
6111 value = lowDouble(element);
6112 storeType = Output::StoreDouble;
6113 }
6114
6115 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
6116
6117 LBasicBlock fastPath = m_out.newBlock();
6118 LBasicBlock slowPath = m_out.newBlock();
6119 LBasicBlock continuation = m_out.newBlock();
6120
6121 m_out.branch(
6122 m_out.aboveOrEqual(
6123 prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)),
6124 unsure(slowPath), unsure(fastPath));
6125
6126 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
6127 m_out.store(
6128 value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), storeType);
6129 LValue newLength = m_out.add(prevLength, m_out.int32One);
6130 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
6131
6132 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
6133 m_out.jump(continuation);
6134
6135 m_out.appendTo(slowPath, continuation);
6136 LValue result;
6137 if (m_node->arrayMode().type() != Array::Double)
6138 result = vmCall(Int64, operationArrayPush, weakPointer(globalObject), value, base);
6139 else
6140 result = vmCall(Int64, operationArrayPushDouble, weakPointer(globalObject), value, base);
6141 ValueFromBlock slowResult = m_out.anchor(result);
6142 m_out.jump(continuation);
6143
6144 m_out.appendTo(continuation, lastNext);
6145 setJSValue(m_out.phi(Int64, fastResult, slowResult));
6146 return;
6147 }
6148
6149 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
6150 Edge element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
6151 speculate(element);
6152 }
6153
6154 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
6155 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
6156
6157 LBasicBlock fastPath = m_out.newBlock();
6158 LBasicBlock slowPath = m_out.newBlock();
6159 LBasicBlock setup = m_out.newBlock();
6160 LBasicBlock slowCallPath = m_out.newBlock();
6161 LBasicBlock continuation = m_out.newBlock();
6162
6163 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength));
6164
6165 m_out.branch(beyondVectorLength, unsure(slowPath), unsure(fastPath));
6166
6167 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
6168 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
6169 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight));
6170 m_out.jump(setup);
6171
6172 m_out.appendTo(slowPath, setup);
6173 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
6174 static_assert(sizeof(EncodedJSValue) == sizeof(double), "");
6175 ASSERT(scratchSize);
6176 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
6177 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
6178 m_out.jump(setup);
6179
6180 m_out.appendTo(setup, slowCallPath);
6181 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
6182 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
6183 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
6184
6185 LValue value;
6186 Output::StoreType storeType;
6187 if (m_node->arrayMode().type() != Array::Double) {
6188 value = lowJSValue(element, ManualOperandSpeculation);
6189 storeType = Output::Store64;
6190 } else {
6191 value = lowDouble(element);
6192 storeType = Output::StoreDouble;
6193 }
6194
6195 m_out.store(value, m_out.baseIndex(heap, buffer, m_out.constInt32(elementIndex), jsNumber(elementIndex)), storeType);
6196 }
6197 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
6198
6199 m_out.branch(beyondVectorLength, unsure(slowCallPath), unsure(continuation));
6200
6201 m_out.appendTo(slowCallPath, continuation);
6202 auto* operation = &operationArrayPushMultiple;
6203 if (m_node->arrayMode().type() == Array::Double)
6204 operation = &operationArrayPushDoubleMultiple;
6205 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operation, weakPointer(globalObject), base, buffer, m_out.constInt32(elementCount)));
6206 m_out.jump(continuation);
6207
6208 m_out.appendTo(continuation, lastNext);
6209 setJSValue(m_out.phi(Int64, fastResult, slowResult));
6210 return;
6211 }
6212
6213 case Array::ArrayStorage: {
6214 // This ensures that the result of ArrayPush is Int32 in AI.
6215 int32_t largestPositiveInt32Length = 0x7fffffff - elementCount;
6216
6217 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
6218 // Refuse to handle bizarre lengths.
6219 speculate(Uncountable, noValue(), nullptr, m_out.above(prevLength, m_out.constInt32(largestPositiveInt32Length)));
6220
6221 if (elementCount == 1) {
6222 Edge& element = m_graph.varArgChild(m_node, elementOffset);
6223
6224 LValue value = lowJSValue(element);
6225
6226 LBasicBlock fastPath = m_out.newBlock();
6227 LBasicBlock slowPath = m_out.newBlock();
6228 LBasicBlock continuation = m_out.newBlock();
6229
6230 m_out.branch(
6231 m_out.aboveOrEqual(
6232 prevLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)),
6233 rarely(slowPath), usually(fastPath));
6234
6235 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
6236 m_out.store64(
6237 value, m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(prevLength)));
6238 LValue newLength = m_out.add(prevLength, m_out.int32One);
6239 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
6240 m_out.store32(
6241 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
6242 storage, m_heaps.ArrayStorage_numValuesInVector);
6243
6244 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
6245 m_out.jump(continuation);
6246
6247 m_out.appendTo(slowPath, continuation);
6248 ValueFromBlock slowResult = m_out.anchor(
6249 vmCall(Int64, operationArrayPush, weakPointer(globalObject), value, base));
6250 m_out.jump(continuation);
6251
6252 m_out.appendTo(continuation, lastNext);
6253 setJSValue(m_out.phi(Int64, fastResult, slowResult));
6254 return;
6255 }
6256
6257 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
6258
6259 LBasicBlock fastPath = m_out.newBlock();
6260 LBasicBlock slowPath = m_out.newBlock();
6261 LBasicBlock setup = m_out.newBlock();
6262 LBasicBlock slowCallPath = m_out.newBlock();
6263 LBasicBlock continuation = m_out.newBlock();
6264
6265 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength));
6266
6267 m_out.branch(beyondVectorLength, rarely(slowPath), usually(fastPath));
6268
6269 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
6270 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
6271 m_out.store32(
6272 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.constInt32(elementCount)),
6273 storage, m_heaps.ArrayStorage_numValuesInVector);
6274 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight, ArrayStorage::vectorOffset()));
6275 m_out.jump(setup);
6276
6277 m_out.appendTo(slowPath, setup);
6278 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
6279 ASSERT(scratchSize);
6280 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
6281 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
6282 m_out.jump(setup);
6283
6284 m_out.appendTo(setup, slowCallPath);
6285 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
6286 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
6287 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
6288
6289 LValue value = lowJSValue(element);
6290 m_out.store64(value, m_out.baseIndex(m_heaps.ArrayStorage_vector.atAnyIndex(), buffer, m_out.constIntPtr(elementIndex), ScaleEight));
6291 }
6292 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
6293
6294 m_out.branch(beyondVectorLength, rarely(slowCallPath), usually(continuation));
6295
6296 m_out.appendTo(slowCallPath, continuation);
6297 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operationArrayPushMultiple, weakPointer(globalObject), base, buffer, m_out.constInt32(elementCount)));
6298 m_out.jump(continuation);
6299
6300 m_out.appendTo(continuation, lastNext);
6301 setJSValue(m_out.phi(Int64, fastResult, slowResult));
6302 return;
6303 }
6304
6305 default:
6306 DFG_CRASH(m_graph, m_node, "Bad array type");
6307 return;
6308 }
6309 }
6310
6311 std::pair<LValue, LValue> populateSliceRange(LValue start, LValue end, LValue length)
6312 {
6313 // end can be nullptr.
6314 ASSERT(start);
6315 ASSERT(length);
6316
6317 auto pickIndex = [&] (LValue index) {
6318 return m_out.select(m_out.greaterThanOrEqual(index, m_out.int32Zero),
6319 m_out.select(m_out.above(index, length), length, index),
6320 m_out.select(m_out.lessThan(m_out.add(length, index), m_out.int32Zero), m_out.int32Zero, m_out.add(length, index)));
6321 };
6322
6323 LValue endBoundary = length;
6324 if (end)
6325 endBoundary = pickIndex(end);
6326 LValue startIndex = pickIndex(start);
6327 return std::make_pair(startIndex, endBoundary);
6328 }
6329
6330 void compileArraySlice()
6331 {
6332 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6333
6334 LValue sourceArray = lowCell(m_graph.varArgChild(m_node, 0));
6335 LValue sourceStorage = lowStorage(m_graph.varArgChild(m_node, m_node->numChildren() - 1));
6336 LValue inputLength = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
6337
6338 LValue startIndex = nullptr;
6339 LValue resultLength = nullptr;
6340 if (m_node->numChildren() == 2) {
6341 startIndex = m_out.constInt32(0);
6342 resultLength = inputLength;
6343 } else {
6344 LValue start = lowInt32(m_graph.varArgChild(m_node, 1));
6345 LValue end = nullptr;
6346 if (m_node->numChildren() != 3)
6347 end = lowInt32(m_graph.varArgChild(m_node, 2));
6348
6349 auto range = populateSliceRange(start, end, inputLength);
6350 startIndex = range.first;
6351 LValue endBoundary = range.second;
6352
6353 resultLength = m_out.select(m_out.belowOrEqual(startIndex, endBoundary),
6354 m_out.sub(endBoundary, startIndex),
6355 m_out.constInt32(0));
6356 }
6357
6358 ArrayValues arrayResult;
6359 {
6360 LValue indexingType = m_out.load8ZeroExt32(sourceArray, m_heaps.JSCell_indexingTypeAndMisc);
6361 // We can ignore the writability of the cell since we won't write to the source.
6362 indexingType = m_out.bitAnd(indexingType, m_out.constInt32(AllWritableArrayTypesAndHistory));
6363 // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
6364 // to ensure the incoming array is one to be one of the original array structures
6365 // with one of the following indexing shapes: Int32, Contiguous, Double.
6366 LValue structure = m_out.select(
6367 m_out.equal(indexingType, m_out.constInt32(ArrayWithInt32)),
6368 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithInt32))),
6369 m_out.select(m_out.equal(indexingType, m_out.constInt32(ArrayWithContiguous)),
6370 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous))),
6371 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithDouble)))));
6372 arrayResult = allocateJSArray(resultLength, resultLength, structure, indexingType, false, false);
6373 }
6374
6375 // Keep the sourceArray alive at least until after anything that can GC.
6376 ensureStillAliveHere(sourceArray);
6377
6378 LBasicBlock loop = m_out.newBlock();
6379 LBasicBlock continuation = m_out.newBlock();
6380
6381 resultLength = m_out.zeroExtPtr(resultLength);
6382 ValueFromBlock startLoadIndex = m_out.anchor(m_out.zeroExtPtr(startIndex));
6383 ValueFromBlock startStoreIndex = m_out.anchor(m_out.constIntPtr(0));
6384
6385 m_out.branch(
6386 m_out.below(m_out.constIntPtr(0), resultLength), unsure(loop), unsure(continuation));
6387
6388 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
6389 LValue storeIndex = m_out.phi(pointerType(), startStoreIndex);
6390 LValue loadIndex = m_out.phi(pointerType(), startLoadIndex);
6391 LValue value = m_out.load64(m_out.baseIndex(m_heaps.root, sourceStorage, loadIndex, ScaleEight));
6392 m_out.store64(value, m_out.baseIndex(m_heaps.root, arrayResult.butterfly, storeIndex, ScaleEight));
6393 LValue nextStoreIndex = m_out.add(storeIndex, m_out.constIntPtr(1));
6394 m_out.addIncomingToPhi(storeIndex, m_out.anchor(nextStoreIndex));
6395 m_out.addIncomingToPhi(loadIndex, m_out.anchor(m_out.add(loadIndex, m_out.constIntPtr(1))));
6396 m_out.branch(
6397 m_out.below(nextStoreIndex, resultLength), unsure(loop), unsure(continuation));
6398
6399 m_out.appendTo(continuation, lastNext);
6400
6401 mutatorFence();
6402 setJSValue(arrayResult.array);
6403 }
6404
6405 void compileArrayIndexOf()
6406 {
6407 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6408 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
6409 LValue storage = lowStorage(m_node->numChildren() == 3 ? m_graph.varArgChild(m_node, 2) : m_graph.varArgChild(m_node, 3));
6410 LValue length = m_out.load32(storage, m_heaps.Butterfly_publicLength);
6411
6412 LValue startIndex;
6413 if (m_node->numChildren() == 4) {
6414 startIndex = lowInt32(m_graph.varArgChild(m_node, 2));
6415 startIndex = m_out.select(m_out.greaterThanOrEqual(startIndex, m_out.int32Zero),
6416 m_out.select(m_out.above(startIndex, length), length, startIndex),
6417 m_out.select(m_out.lessThan(m_out.add(length, startIndex), m_out.int32Zero), m_out.int32Zero, m_out.add(length, startIndex)));
6418 } else
6419 startIndex = m_out.int32Zero;
6420
6421 Edge& searchElementEdge = m_graph.varArgChild(m_node, 1);
6422 switch (searchElementEdge.useKind()) {
6423 case Int32Use:
6424 case ObjectUse:
6425 case SymbolUse:
6426 case OtherUse:
6427 case DoubleRepUse: {
6428 LBasicBlock loopHeader = m_out.newBlock();
6429 LBasicBlock loopBody = m_out.newBlock();
6430 LBasicBlock loopNext = m_out.newBlock();
6431 LBasicBlock notFound = m_out.newBlock();
6432 LBasicBlock continuation = m_out.newBlock();
6433
6434 LValue searchElement;
6435 switch (searchElementEdge.useKind()) {
6436 case Int32Use:
6437 ASSERT(m_node->arrayMode().type() == Array::Int32);
6438 speculate(searchElementEdge);
6439 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
6440 break;
6441 case ObjectUse:
6442 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
6443 searchElement = lowObject(searchElementEdge);
6444 break;
6445 case SymbolUse:
6446 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
6447 searchElement = lowSymbol(searchElementEdge);
6448 break;
6449 case OtherUse:
6450 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
6451 speculate(searchElementEdge);
6452 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
6453 break;
6454 case DoubleRepUse:
6455 ASSERT(m_node->arrayMode().type() == Array::Double);
6456 searchElement = lowDouble(searchElementEdge);
6457 break;
6458 default:
6459 RELEASE_ASSERT_NOT_REACHED();
6460 break;
6461 }
6462
6463 startIndex = m_out.zeroExtPtr(startIndex);
6464 length = m_out.zeroExtPtr(length);
6465
6466 ValueFromBlock initialStartIndex = m_out.anchor(startIndex);
6467 m_out.jump(loopHeader);
6468
6469 LBasicBlock lastNext = m_out.appendTo(loopHeader, loopBody);
6470 LValue index = m_out.phi(pointerType(), initialStartIndex);
6471 m_out.branch(m_out.notEqual(index, length), unsure(loopBody), unsure(notFound));
6472
6473 m_out.appendTo(loopBody, loopNext);
6474 ValueFromBlock foundResult = m_out.anchor(index);
6475 switch (searchElementEdge.useKind()) {
6476 case Int32Use: {
6477 // Empty value is ignored because of JSValue::NumberTag.
6478 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedInt32Properties, storage, index));
6479 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
6480 break;
6481 }
6482 case ObjectUse:
6483 case SymbolUse:
6484 case OtherUse: {
6485 // Empty value never matches against non-empty JS values.
6486 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, index));
6487 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
6488 break;
6489 }
6490 case DoubleRepUse: {
6491 // Empty value is ignored because of NaN.
6492 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, storage, index));
6493 m_out.branch(m_out.doubleEqual(value, searchElement), unsure(continuation), unsure(loopNext));
6494 break;
6495 }
6496 default:
6497 RELEASE_ASSERT_NOT_REACHED();
6498 break;
6499 }
6500
6501 m_out.appendTo(loopNext, notFound);
6502 LValue nextIndex = m_out.add(index, m_out.intPtrOne);
6503 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6504 m_out.jump(loopHeader);
6505
6506 m_out.appendTo(notFound, continuation);
6507 ValueFromBlock notFoundResult = m_out.anchor(m_out.constIntPtr(-1));
6508 m_out.jump(continuation);
6509
6510 m_out.appendTo(continuation, lastNext);
6511 // We have to keep base alive since that keeps content of storage alive.
6512 ensureStillAliveHere(base);
6513 setInt32(m_out.castToInt32(m_out.phi(pointerType(), notFoundResult, foundResult)));
6514 return;
6515 }
6516
6517 case StringUse:
6518 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
6519 // We have to keep base alive since that keeps storage alive.
6520 ensureStillAliveHere(base);
6521 setInt32(m_out.castToInt32(vmCall(Int64, operationArrayIndexOfString, weakPointer(globalObject), storage, lowString(searchElementEdge), startIndex)));
6522 return;
6523
6524 case UntypedUse:
6525 switch (m_node->arrayMode().type()) {
6526 case Array::Double:
6527 setInt32(m_out.castToInt32(vmCall(Int64, operationArrayIndexOfValueDouble, weakPointer(globalObject), storage, lowJSValue(searchElementEdge), startIndex)));
6528 return;
6529 case Array::Contiguous:
6530 // We have to keep base alive since that keeps content of storage alive.
6531 ensureStillAliveHere(base);
6532 FALLTHROUGH;
6533 case Array::Int32:
6534 setInt32(m_out.castToInt32(vmCall(Int64, operationArrayIndexOfValueInt32OrContiguous, weakPointer(globalObject), storage, lowJSValue(searchElementEdge), startIndex)));
6535 return;
6536 default:
6537 RELEASE_ASSERT_NOT_REACHED();
6538 return;
6539 }
6540 return;
6541
6542 default:
6543 RELEASE_ASSERT_NOT_REACHED();
6544 return;
6545 }
6546 }
6547
6548
6549 void compileArrayPop()
6550 {
6551 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6552 LValue base = lowCell(m_node->child1());
6553 LValue storage = lowStorage(m_node->child2());
6554
6555 switch (m_node->arrayMode().type()) {
6556 case Array::Int32:
6557 case Array::Double:
6558 case Array::Contiguous: {
6559 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
6560
6561 LBasicBlock fastCase = m_out.newBlock();
6562 LBasicBlock slowCase = m_out.newBlock();
6563 LBasicBlock continuation = m_out.newBlock();
6564
6565 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
6566
6567 Vector<ValueFromBlock, 3> results;
6568 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
6569 m_out.branch(
6570 m_out.isZero32(prevLength), rarely(continuation), usually(fastCase));
6571
6572 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
6573 LValue newLength = m_out.sub(prevLength, m_out.int32One);
6574 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
6575 TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
6576 if (m_node->arrayMode().type() != Array::Double) {
6577 LValue result = m_out.load64(pointer);
6578 // We have to keep base alive to keep content in storage alive.
6579 if (m_node->arrayMode().type() == Array::Contiguous)
6580 ensureStillAliveHere(base);
6581 m_out.store64(m_out.int64Zero, pointer);
6582 results.append(m_out.anchor(result));
6583 m_out.branch(
6584 m_out.notZero64(result), usually(continuation), rarely(slowCase));
6585 } else {
6586 LValue result = m_out.loadDouble(pointer);
6587 m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer);
6588 results.append(m_out.anchor(boxDouble(result)));
6589 m_out.branch(
6590 m_out.doubleEqual(result, result),
6591 usually(continuation), rarely(slowCase));
6592 }
6593
6594 m_out.appendTo(slowCase, continuation);
6595 results.append(m_out.anchor(vmCall(
6596 Int64, operationArrayPopAndRecoverLength, weakPointer(globalObject), base)));
6597 m_out.jump(continuation);
6598
6599 m_out.appendTo(continuation, lastNext);
6600 setJSValue(m_out.phi(Int64, results));
6601 return;
6602 }
6603
6604 case Array::ArrayStorage: {
6605 LBasicBlock vectorLengthCheckCase = m_out.newBlock();
6606 LBasicBlock popCheckCase = m_out.newBlock();
6607 LBasicBlock fastCase = m_out.newBlock();
6608 LBasicBlock slowCase = m_out.newBlock();
6609 LBasicBlock continuation = m_out.newBlock();
6610
6611 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
6612
6613 Vector<ValueFromBlock, 3> results;
6614 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
6615 m_out.branch(
6616 m_out.isZero32(prevLength), rarely(continuation), usually(vectorLengthCheckCase));
6617
6618 LBasicBlock lastNext = m_out.appendTo(vectorLengthCheckCase, popCheckCase);
6619 LValue newLength = m_out.sub(prevLength, m_out.int32One);
6620 m_out.branch(
6621 m_out.aboveOrEqual(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)), rarely(slowCase), usually(popCheckCase));
6622
6623 m_out.appendTo(popCheckCase, fastCase);
6624 TypedPointer pointer = m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(newLength));
6625 LValue result = m_out.load64(pointer);
6626 // We have to keep base alive to keep content in storage alive.
6627 ensureStillAliveHere(base);
6628 m_out.branch(m_out.notZero64(result), usually(fastCase), rarely(slowCase));
6629
6630 m_out.appendTo(fastCase, slowCase);
6631 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
6632 m_out.store64(m_out.int64Zero, pointer);
6633 m_out.store32(
6634 m_out.sub(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
6635 storage, m_heaps.ArrayStorage_numValuesInVector);
6636 results.append(m_out.anchor(result));
6637 m_out.jump(continuation);
6638
6639 m_out.appendTo(slowCase, continuation);
6640 results.append(m_out.anchor(vmCall(Int64, operationArrayPop, weakPointer(globalObject), base)));
6641 m_out.jump(continuation);
6642
6643 m_out.appendTo(continuation, lastNext);
6644 setJSValue(m_out.phi(Int64, results));
6645 return;
6646 }
6647
6648 default:
6649 DFG_CRASH(m_graph, m_node, "Bad array type");
6650 return;
6651 }
6652 }
6653
6654 void compilePushWithScope()
6655 {
6656 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6657 LValue parentScope = lowCell(m_node->child1());
6658 auto objectEdge = m_node->child2();
6659 if (objectEdge.useKind() == ObjectUse) {
6660 LValue object = lowNonNullObject(objectEdge);
6661 LValue result = vmCall(Int64, operationPushWithScopeObject, weakPointer(globalObject), parentScope, object);
6662 setJSValue(result);
6663 } else {
6664 ASSERT(objectEdge.useKind() == UntypedUse);
6665 LValue object = lowJSValue(m_node->child2());
6666 LValue result = vmCall(Int64, operationPushWithScope, weakPointer(globalObject), parentScope, object);
6667 setJSValue(result);
6668 }
6669 }
6670
6671 void compileCreateActivation()
6672 {
6673 LValue scope = lowCell(m_node->child1());
6674 SymbolTable* table = m_node->castOperand<SymbolTable*>();
6675 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_origin.semantic)->activationStructure());
6676 JSValue initializationValue = m_node->initializationValueForActivation();
6677 ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue());
6678 if (table->singleton().isStillValid()) {
6679 LValue callResult = vmCall(
6680 Int64,
6681 operationCreateActivationDirect, m_vmValue, weakStructure(structure),
6682 scope, weakPointer(table), m_out.constInt64(JSValue::encode(initializationValue)));
6683 setJSValue(callResult);
6684 return;
6685 }
6686
6687 LBasicBlock slowPath = m_out.newBlock();
6688 LBasicBlock continuation = m_out.newBlock();
6689
6690 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
6691
6692 LValue fastObject = allocateObject<JSLexicalEnvironment>(
6693 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
6694
6695 // We don't need memory barriers since we just fast-created the activation, so the
6696 // activation must be young.
6697 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
6698 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
6699
6700 for (unsigned i = 0; i < table->scopeSize(); ++i) {
6701 m_out.store64(
6702 m_out.constInt64(JSValue::encode(initializationValue)),
6703 fastObject, m_heaps.JSLexicalEnvironment_variables[i]);
6704 }
6705
6706 mutatorFence();
6707
6708 ValueFromBlock fastResult = m_out.anchor(fastObject);
6709 m_out.jump(continuation);
6710
6711 m_out.appendTo(slowPath, continuation);
6712 VM& vm = this->vm();
6713 LValue callResult = lazySlowPath(
6714 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6715 return createLazyCallGenerator(vm,
6716 operationCreateActivationDirect, locations[0].directGPR(), &vm,
6717 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
6718 CCallHelpers::TrustedImmPtr(table),
6719 CCallHelpers::TrustedImm64(JSValue::encode(initializationValue)));
6720 },
6721 scope);
6722 ValueFromBlock slowResult = m_out.anchor(callResult);
6723 m_out.jump(continuation);
6724
6725 m_out.appendTo(continuation, lastNext);
6726 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
6727 }
6728
6729 void compileNewFunction()
6730 {
6731 ASSERT(m_node->op() == NewFunction || m_node->op() == NewGeneratorFunction || m_node->op() == NewAsyncGeneratorFunction || m_node->op() == NewAsyncFunction);
6732 bool isGeneratorFunction = m_node->op() == NewGeneratorFunction;
6733 bool isAsyncFunction = m_node->op() == NewAsyncFunction;
6734 bool isAsyncGeneratorFunction = m_node->op() == NewAsyncGeneratorFunction;
6735
6736 LValue scope = lowCell(m_node->child1());
6737
6738 FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
6739 if (executable->singleton().isStillValid()) {
6740 LValue callResult =
6741 isGeneratorFunction ? vmCall(Int64, operationNewGeneratorFunction, m_vmValue, scope, weakPointer(executable)) :
6742 isAsyncFunction ? vmCall(Int64, operationNewAsyncFunction, m_vmValue, scope, weakPointer(executable)) :
6743 isAsyncGeneratorFunction ? vmCall(Int64, operationNewAsyncGeneratorFunction, m_vmValue, scope, weakPointer(executable)) :
6744 vmCall(Int64, operationNewFunction, m_vmValue, scope, weakPointer(executable));
6745 setJSValue(callResult);
6746 return;
6747 }
6748
6749 RegisteredStructure structure = m_graph.registerStructure(
6750 [&] () {
6751 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6752 switch (m_node->op()) {
6753 case NewGeneratorFunction:
6754 return globalObject->generatorFunctionStructure();
6755 case NewAsyncFunction:
6756 return globalObject->asyncFunctionStructure();
6757 case NewAsyncGeneratorFunction:
6758 return globalObject->asyncGeneratorFunctionStructure();
6759 case NewFunction:
6760 return JSFunction::selectStructureForNewFuncExp(globalObject, m_node->castOperand<FunctionExecutable*>());
6761 default:
6762 RELEASE_ASSERT_NOT_REACHED();
6763 }
6764 }());
6765
6766 LBasicBlock slowPath = m_out.newBlock();
6767 LBasicBlock continuation = m_out.newBlock();
6768
6769 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
6770
6771 LValue fastObject =
6772 isGeneratorFunction ? allocateObject<JSGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
6773 isAsyncFunction ? allocateObject<JSAsyncFunction>(structure, m_out.intPtrZero, slowPath) :
6774 isAsyncGeneratorFunction ? allocateObject<JSAsyncGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
6775 allocateObject<JSFunction>(structure, m_out.intPtrZero, slowPath);
6776
6777
6778 // We don't need memory barriers since we just fast-created the function, so it
6779 // must be young.
6780 m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
6781 m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executableOrRareData);
6782 mutatorFence();
6783
6784 ValueFromBlock fastResult = m_out.anchor(fastObject);
6785 m_out.jump(continuation);
6786
6787 m_out.appendTo(slowPath, continuation);
6788
6789 Vector<LValue> slowPathArguments;
6790 slowPathArguments.append(scope);
6791 VM& vm = this->vm();
6792 LValue callResult = lazySlowPath(
6793 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6794 auto* operation = operationNewFunctionWithInvalidatedReallocationWatchpoint;
6795 if (isGeneratorFunction)
6796 operation = operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint;
6797 else if (isAsyncFunction)
6798 operation = operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint;
6799 else if (isAsyncGeneratorFunction)
6800 operation = operationNewAsyncGeneratorFunctionWithInvalidatedReallocationWatchpoint;
6801
6802 return createLazyCallGenerator(vm, operation,
6803 locations[0].directGPR(), &vm, locations[1].directGPR(),
6804 CCallHelpers::TrustedImmPtr(executable));
6805 },
6806 slowPathArguments);
6807 ValueFromBlock slowResult = m_out.anchor(callResult);
6808 m_out.jump(continuation);
6809
6810 m_out.appendTo(continuation, lastNext);
6811 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
6812 }
6813
6814 void compileCreateDirectArguments()
6815 {
6816 // FIXME: A more effective way of dealing with the argument count and callee is to have
6817 // them be explicit arguments to this node.
6818 // https://bugs.webkit.org/show_bug.cgi?id=142207
6819
6820 RegisteredStructure structure =
6821 m_graph.registerStructure(m_graph.globalObjectFor(m_origin.semantic)->directArgumentsStructure());
6822
6823 unsigned minCapacity = m_graph.baselineCodeBlockFor(m_origin.semantic)->numParameters() - 1;
6824
6825 LBasicBlock slowPath = m_out.newBlock();
6826 LBasicBlock continuation = m_out.newBlock();
6827
6828 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
6829
6830 ArgumentsLength length = getArgumentsLength();
6831
6832 LValue fastObject;
6833 if (length.isKnown) {
6834 fastObject = allocateObject<DirectArguments>(
6835 DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
6836 m_out.intPtrZero, slowPath);
6837 } else {
6838 LValue size = m_out.add(
6839 m_out.shl(length.value, m_out.constInt32(3)),
6840 m_out.constInt32(DirectArguments::storageOffset()));
6841
6842 size = m_out.select(
6843 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
6844 size, m_out.constInt32(DirectArguments::allocationSize(minCapacity)));
6845
6846 fastObject = allocateVariableSizedObject<DirectArguments>(
6847 m_out.zeroExtPtr(size), structure, m_out.intPtrZero, slowPath);
6848 }
6849
6850 m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
6851 m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
6852 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_mappedArguments);
6853 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_modifiedArgumentsDescriptor);
6854
6855 ValueFromBlock fastResult = m_out.anchor(fastObject);
6856 m_out.jump(continuation);
6857
6858 m_out.appendTo(slowPath, continuation);
6859 VM& vm = this->vm();
6860 LValue callResult = lazySlowPath(
6861 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6862 return createLazyCallGenerator(vm,
6863 operationCreateDirectArguments, locations[0].directGPR(), &vm,
6864 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
6865 CCallHelpers::TrustedImm32(minCapacity));
6866 }, length.value);
6867 ValueFromBlock slowResult = m_out.anchor(callResult);
6868 m_out.jump(continuation);
6869
6870 m_out.appendTo(continuation, lastNext);
6871 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
6872
6873 m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee);
6874
6875 if (length.isKnown) {
6876 VirtualRegister start = AssemblyHelpers::argumentsStart(m_origin.semantic);
6877 for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) {
6878 m_out.store64(
6879 m_out.load64(addressFor(start + i)),
6880 result, m_heaps.DirectArguments_storage[i]);
6881 }
6882 } else {
6883 LValue stackBase = getArgumentsStart();
6884
6885 LBasicBlock loop = m_out.newBlock();
6886 LBasicBlock end = m_out.newBlock();
6887
6888 ValueFromBlock originalLength;
6889 if (minCapacity) {
6890 LValue capacity = m_out.select(
6891 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
6892 length.value,
6893 m_out.constInt32(minCapacity));
6894 LValue originalLengthValue = m_out.zeroExtPtr(capacity);
6895 originalLength = m_out.anchor(originalLengthValue);
6896 m_out.jump(loop);
6897 } else {
6898 LValue originalLengthValue = m_out.zeroExtPtr(length.value);
6899 originalLength = m_out.anchor(originalLengthValue);
6900 m_out.branch(m_out.isNull(originalLengthValue), unsure(end), unsure(loop));
6901 }
6902
6903 lastNext = m_out.appendTo(loop, end);
6904 LValue previousIndex = m_out.phi(pointerType(), originalLength);
6905 LValue index = m_out.sub(previousIndex, m_out.intPtrOne);
6906 m_out.store64(
6907 m_out.load64(m_out.baseIndex(m_heaps.variables, stackBase, index)),
6908 m_out.baseIndex(m_heaps.DirectArguments_storage, result, index));
6909 ValueFromBlock nextIndex = m_out.anchor(index);
6910 m_out.addIncomingToPhi(previousIndex, nextIndex);
6911 m_out.branch(m_out.isNull(index), unsure(end), unsure(loop));
6912
6913 m_out.appendTo(end, lastNext);
6914 }
6915
6916 mutatorFence();
6917
6918 setJSValue(result);
6919 }
6920
6921 void compileCreateScopedArguments()
6922 {
6923 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6924 LValue scope = lowCell(m_node->child1());
6925
6926 LValue result = vmCall(
6927 Int64, operationCreateScopedArguments, weakPointer(globalObject),
6928 weakPointer(
6929 m_graph.globalObjectFor(m_origin.semantic)->scopedArgumentsStructure()),
6930 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee(), scope);
6931
6932 setJSValue(result);
6933 }
6934
6935 void compileCreateClonedArguments()
6936 {
6937 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6938 LValue result = vmCall(
6939 Int64, operationCreateClonedArguments, weakPointer(globalObject),
6940 weakPointer(
6941 m_graph.globalObjectFor(m_origin.semantic)->clonedArgumentsStructure()),
6942 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee());
6943
6944 setJSValue(result);
6945 }
6946
6947 void compileCreateArgumentsButterfly()
6948 {
6949 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6950 LValue result = vmCall(Int64, operationCreateArgumentsButterfly, weakPointer(globalObject), getArgumentsStart(), getArgumentsLength().value);
6951 setJSValue(result);
6952 }
6953
6954 void compileCreateRest()
6955 {
6956 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
6957 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
6958 LBasicBlock continuation = m_out.newBlock();
6959 LValue arrayLength = lowInt32(m_node->child1());
6960 LBasicBlock loopStart = m_out.newBlock();
6961 RegisteredStructure structure = m_graph.registerStructure(globalObject->originalRestParameterStructure());
6962 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(arrayLength, structure);
6963 LValue array = arrayValues.array;
6964 LValue butterfly = arrayValues.butterfly;
6965 ValueFromBlock startLength = m_out.anchor(arrayLength);
6966 LValue argumentRegion = m_out.add(getArgumentsStart(), m_out.constInt64(sizeof(Register) * m_node->numberOfArgumentsToSkip()));
6967 m_out.branch(m_out.equal(arrayLength, m_out.constInt32(0)),
6968 unsure(continuation), unsure(loopStart));
6969
6970 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6971 LValue phiOffset = m_out.phi(Int32, startLength);
6972 LValue currentOffset = m_out.sub(phiOffset, m_out.int32One);
6973 m_out.addIncomingToPhi(phiOffset, m_out.anchor(currentOffset));
6974 LValue loadedValue = m_out.load64(m_out.baseIndex(m_heaps.variables, argumentRegion, m_out.zeroExtPtr(currentOffset)));
6975 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6976 m_out.store64(loadedValue, m_out.baseIndex(heap, butterfly, m_out.zeroExtPtr(currentOffset)));
6977 m_out.branch(m_out.equal(currentOffset, m_out.constInt32(0)), unsure(continuation), unsure(loopStart));
6978
6979 m_out.appendTo(continuation, lastNext);
6980 mutatorFence();
6981 setJSValue(array);
6982 return;
6983 }
6984
6985 LValue arrayLength = lowInt32(m_node->child1());
6986 LValue argumentStart = getArgumentsStart();
6987 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
6988 setJSValue(vmCall(
6989 Int64, operationCreateRest, weakPointer(globalObject), argumentStart, numberOfArgumentsToSkip, arrayLength));
6990 }
6991
6992 void compileGetRestLength()
6993 {
6994 LBasicBlock nonZeroLength = m_out.newBlock();
6995 LBasicBlock continuation = m_out.newBlock();
6996
6997 ValueFromBlock zeroLengthResult = m_out.anchor(m_out.constInt32(0));
6998
6999 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
7000 LValue argumentsLength = getArgumentsLength().value;
7001 m_out.branch(m_out.above(argumentsLength, numberOfArgumentsToSkip),
7002 unsure(nonZeroLength), unsure(continuation));
7003
7004 LBasicBlock lastNext = m_out.appendTo(nonZeroLength, continuation);
7005 ValueFromBlock nonZeroLengthResult = m_out.anchor(m_out.sub(argumentsLength, numberOfArgumentsToSkip));
7006 m_out.jump(continuation);
7007
7008 m_out.appendTo(continuation, lastNext);
7009 setInt32(m_out.phi(Int32, zeroLengthResult, nonZeroLengthResult));
7010 }
7011
7012 void compileObjectKeysOrObjectGetOwnPropertyNames()
7013 {
7014 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7015 NodeType op = m_node->op();
7016 switch (m_node->child1().useKind()) {
7017 case ObjectUse: {
7018 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
7019 LBasicBlock notNullCase = m_out.newBlock();
7020 LBasicBlock rareDataCase = m_out.newBlock();
7021 LBasicBlock useCacheCase = m_out.newBlock();
7022 LBasicBlock slowButArrayBufferCase = m_out.newBlock();
7023 LBasicBlock slowCase = m_out.newBlock();
7024 LBasicBlock continuation = m_out.newBlock();
7025
7026 LValue object = lowObject(m_node->child1());
7027 LValue structure = loadStructure(object);
7028 LValue previousOrRareData = m_out.loadPtr(structure, m_heaps.Structure_previousOrRareData);
7029 m_out.branch(m_out.notNull(previousOrRareData), unsure(notNullCase), unsure(slowCase));
7030
7031 LBasicBlock lastNext = m_out.appendTo(notNullCase, rareDataCase);
7032 m_out.branch(
7033 m_out.notEqual(m_out.load32(previousOrRareData, m_heaps.JSCell_structureID), m_out.constInt32(m_graph.m_vm.structureStructure->structureID())),
7034 unsure(rareDataCase), unsure(slowCase));
7035
7036 m_out.appendTo(rareDataCase, useCacheCase);
7037 ASSERT(bitwise_cast<uintptr_t>(StructureRareData::cachedPropertyNamesSentinel()) == 1);
7038 LValue cached = m_out.loadPtr(previousOrRareData, op == ObjectKeys ? m_heaps.StructureRareData_cachedKeys : m_heaps.StructureRareData_cachedGetOwnPropertyNames);
7039 m_out.branch(m_out.belowOrEqual(cached, m_out.constIntPtr(bitwise_cast<void*>(StructureRareData::cachedPropertyNamesSentinel()))), unsure(slowCase), unsure(useCacheCase));
7040
7041 m_out.appendTo(useCacheCase, slowButArrayBufferCase);
7042 RegisteredStructure arrayStructure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(CopyOnWriteArrayWithContiguous));
7043 LValue fastArray = allocateObject<JSArray>(arrayStructure, m_out.addPtr(cached, JSImmutableButterfly::offsetOfData()), slowButArrayBufferCase);
7044 ValueFromBlock fastResult = m_out.anchor(fastArray);
7045 m_out.jump(continuation);
7046
7047 m_out.appendTo(slowButArrayBufferCase, slowCase);
7048 LValue slowArray = vmCall(Int64, operationNewArrayBuffer, m_vmValue, weakStructure(arrayStructure), cached);
7049 ValueFromBlock slowButArrayBufferResult = m_out.anchor(slowArray);
7050 m_out.jump(continuation);
7051
7052 m_out.appendTo(slowCase, continuation);
7053 VM& vm = this->vm();
7054 LValue slowResultValue = lazySlowPath(
7055 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
7056 return createLazyCallGenerator(vm,
7057 op == ObjectKeys ? operationObjectKeysObject : operationObjectGetOwnPropertyNamesObject, locations[0].directGPR(), globalObject, locations[1].directGPR());
7058 },
7059 object);
7060 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
7061 m_out.jump(continuation);
7062
7063 m_out.appendTo(continuation, lastNext);
7064 setJSValue(m_out.phi(pointerType(), fastResult, slowButArrayBufferResult, slowResult));
7065 break;
7066 }
7067 setJSValue(vmCall(Int64, op == ObjectKeys ? operationObjectKeysObject : operationObjectGetOwnPropertyNamesObject, weakPointer(globalObject), lowObject(m_node->child1())));
7068 break;
7069 }
7070 case UntypedUse:
7071 setJSValue(vmCall(Int64, op == ObjectKeys ? operationObjectKeys : operationObjectGetOwnPropertyNames, weakPointer(globalObject), lowJSValue(m_node->child1())));
7072 break;
7073 default:
7074 RELEASE_ASSERT_NOT_REACHED();
7075 break;
7076 }
7077 }
7078
7079 void compileObjectCreate()
7080 {
7081 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7082 switch (m_node->child1().useKind()) {
7083 case ObjectUse:
7084 setJSValue(vmCall(Int64, operationObjectCreateObject, weakPointer(globalObject), lowObject(m_node->child1())));
7085 break;
7086 case UntypedUse:
7087 setJSValue(vmCall(Int64, operationObjectCreate, weakPointer(globalObject), lowJSValue(m_node->child1())));
7088 break;
7089 default:
7090 RELEASE_ASSERT_NOT_REACHED();
7091 break;
7092 }
7093 }
7094
7095 void compileNewObject()
7096 {
7097 setJSValue(allocateObject(m_node->structure()));
7098 mutatorFence();
7099 }
7100
7101 template<typename JSClass, typename Operation>
7102 void compileNewInternalFieldObjectImpl(Operation operation)
7103 {
7104 LBasicBlock slowCase = m_out.newBlock();
7105 LBasicBlock continuation = m_out.newBlock();
7106
7107 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
7108
7109 LValue object = allocateObject<JSClass>(m_node->structure(), m_out.intPtrZero, slowCase);
7110 auto initialValues = JSClass::initialValues();
7111 static_assert(initialValues.size() == JSClass::numberOfInternalFields);
7112 for (unsigned index = 0; index < initialValues.size(); ++index)
7113 m_out.store64(m_out.constInt64(JSValue::encode(initialValues[index])), object, m_heaps.JSInternalFieldObjectImpl_internalFields[index]);
7114 mutatorFence();
7115 ValueFromBlock fastResult = m_out.anchor(object);
7116 m_out.jump(continuation);
7117
7118 m_out.appendTo(slowCase, continuation);
7119 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), operation, m_vmValue, frozenPointer(m_graph.freezeStrong(m_node->structure().get()))));
7120 m_out.jump(continuation);
7121
7122 m_out.appendTo(continuation, lastNext);
7123 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
7124 }
7125
7126 void compileNewGenerator()
7127 {
7128 compileNewInternalFieldObjectImpl<JSGenerator>(operationNewGenerator);
7129 }
7130
7131 void compileNewAsyncGenerator()
7132 {
7133 compileNewInternalFieldObjectImpl<JSAsyncGenerator>(operationNewAsyncGenerator);
7134 }
7135
7136 void compileNewInternalFieldObject()
7137 {
7138 switch (m_node->structure()->typeInfo().type()) {
7139 case JSArrayIteratorType:
7140 compileNewInternalFieldObjectImpl<JSArrayIterator>(operationNewArrayIterator);
7141 break;
7142 case JSMapIteratorType:
7143 compileNewInternalFieldObjectImpl<JSMapIterator>(operationNewMapIterator);
7144 break;
7145 case JSSetIteratorType:
7146 compileNewInternalFieldObjectImpl<JSSetIterator>(operationNewSetIterator);
7147 break;
7148 case JSPromiseType:
7149 if (m_node->structure()->classInfo() == JSInternalPromise::info())
7150 compileNewInternalFieldObjectImpl<JSInternalPromise>(operationNewInternalPromise);
7151 else {
7152 ASSERT(m_node->structure()->classInfo() == JSPromise::info());
7153 compileNewInternalFieldObjectImpl<JSPromise>(operationNewPromise);
7154 }
7155 break;
7156 default:
7157 DFG_CRASH(m_graph, m_node, "Bad structure");
7158 }
7159 }
7160
7161 void compileNewStringObject()
7162 {
7163 // FIXME: We should handle this as JSInternalFieldObject allocation.
7164 // https://bugs.webkit.org/show_bug.cgi?id=209453
7165 RegisteredStructure structure = m_node->structure();
7166 LValue string = lowString(m_node->child1());
7167
7168 LBasicBlock slowCase = m_out.newBlock();
7169 LBasicBlock continuation = m_out.newBlock();
7170
7171 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
7172
7173 LValue fastResultValue = allocateObject<StringObject>(structure, m_out.intPtrZero, slowCase);
7174 m_out.store64(string, fastResultValue, m_heaps.JSWrapperObject_internalValue);
7175 mutatorFence();
7176 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
7177 m_out.jump(continuation);
7178
7179 m_out.appendTo(slowCase, continuation);
7180 VM& vm = this->vm();
7181 LValue slowResultValue = lazySlowPath(
7182 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
7183 return createLazyCallGenerator(vm,
7184 operationNewStringObject, locations[0].directGPR(), &vm, locations[1].directGPR(),
7185 CCallHelpers::TrustedImmPtr(structure.get()));
7186 },
7187 string);
7188 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
7189 m_out.jump(continuation);
7190
7191 m_out.appendTo(continuation, lastNext);
7192 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
7193 }
7194
7195 void compileNewSymbol()
7196 {
7197 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7198 if (!m_node->child1()) {
7199 setJSValue(vmCall(pointerType(), operationNewSymbol, m_vmValue));
7200 return;
7201 }
7202 ASSERT(m_node->child1().useKind() == KnownStringUse);
7203 setJSValue(vmCall(pointerType(), operationNewSymbolWithDescription, weakPointer(globalObject), lowString(m_node->child1())));
7204 }
7205
7206 void compileNewArray()
7207 {
7208 // First speculate appropriately on all of the children. Do this unconditionally up here
7209 // because some of the slow paths may otherwise forget to do it. It's sort of arguable
7210 // that doing the speculations up here might be unprofitable for RA - so we can consider
7211 // sinking this to below the allocation fast path if we find that this has a lot of
7212 // register pressure.
7213 // Because we first speculate on all of the children here, we can never exit after creating
7214 // uninitialized contiguous JSArray, which ensures that we will never produce a half-baked JSArray.
7215 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
7216 speculate(m_graph.varArgChild(m_node, operandIndex));
7217
7218 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7219 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
7220 m_node->indexingType()));
7221
7222 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
7223 unsigned numElements = m_node->numChildren();
7224 unsigned vectorLengthHint = m_node->vectorLengthHint();
7225 ASSERT(vectorLengthHint >= numElements);
7226
7227 ArrayValues arrayValues =
7228 allocateUninitializedContiguousJSArray(numElements, vectorLengthHint, structure);
7229
7230 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
7231 Edge edge = m_graph.varArgChild(m_node, operandIndex);
7232
7233 switch (m_node->indexingType()) {
7234 case ALL_BLANK_INDEXING_TYPES:
7235 case ALL_UNDECIDED_INDEXING_TYPES:
7236 DFG_CRASH(m_graph, m_node, "Bad indexing type");
7237 break;
7238
7239 case ALL_DOUBLE_INDEXING_TYPES:
7240 m_out.storeDouble(
7241 lowDouble(edge),
7242 arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
7243 break;
7244
7245 case ALL_INT32_INDEXING_TYPES:
7246 case ALL_CONTIGUOUS_INDEXING_TYPES:
7247 m_out.store64(
7248 lowJSValue(edge, ManualOperandSpeculation),
7249 arrayValues.butterfly,
7250 m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
7251 break;
7252
7253 default:
7254 DFG_CRASH(m_graph, m_node, "Corrupt indexing type");
7255 break;
7256 }
7257 }
7258
7259 setJSValue(arrayValues.array);
7260 mutatorFence();
7261 return;
7262 }
7263
7264 if (!m_node->numChildren()) {
7265 setJSValue(vmCall(
7266 Int64, operationNewEmptyArray, m_vmValue,
7267 weakStructure(structure)));
7268 return;
7269 }
7270
7271 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
7272 ASSERT(scratchSize);
7273 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
7274 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
7275
7276 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
7277 Edge edge = m_graph.varArgChild(m_node, operandIndex);
7278 LValue valueToStore;
7279 switch (m_node->indexingType()) {
7280 case ALL_DOUBLE_INDEXING_TYPES:
7281 valueToStore = boxDouble(lowDouble(edge));
7282 break;
7283 default:
7284 valueToStore = lowJSValue(edge, ManualOperandSpeculation);
7285 break;
7286 }
7287 m_out.store64(valueToStore, m_out.absolute(buffer + operandIndex));
7288 }
7289
7290 LValue result = vmCall(
7291 Int64, operationNewArray, weakPointer(globalObject),
7292 weakStructure(structure), m_out.constIntPtr(buffer),
7293 m_out.constIntPtr(m_node->numChildren()));
7294
7295 setJSValue(result);
7296 }
7297
7298 void compileNewArrayWithSpread()
7299 {
7300 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7301 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
7302 CheckedInt32 startLength = 0;
7303 BitVector* bitVector = m_node->bitVector();
7304 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
7305
7306 if (m_node->numChildren() == 1 && bitVector->get(0)) {
7307 Edge use = m_graph.varArgChild(m_node, 0);
7308 if (use->op() == PhantomSpread) {
7309 if (use->child1()->op() == PhantomNewArrayBuffer) {
7310 auto* immutableButterfly = use->child1()->castOperand<JSImmutableButterfly*>();
7311 if (hasContiguous(immutableButterfly->indexingType())) {
7312 RegisteredStructure structure = m_graph.registerStructure(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
7313 LBasicBlock slowPath = m_out.newBlock();
7314 LBasicBlock continuation = m_out.newBlock();
7315
7316 LValue fastArray = allocateObject<JSArray>(structure, m_out.constIntPtr(immutableButterfly->toButterfly()), slowPath);
7317 ValueFromBlock fastResult = m_out.anchor(fastArray);
7318 m_out.jump(continuation);
7319
7320 m_out.appendTo(slowPath, continuation);
7321 LValue slowArray = vmCall(Int64, operationNewArrayBuffer, m_vmValue, weakStructure(structure), frozenPointer(use->child1()->cellOperand()));
7322 ValueFromBlock slowResult = m_out.anchor(slowArray);
7323 m_out.jump(continuation);
7324
7325 m_out.appendTo(continuation);
7326
7327 mutatorFence();
7328 setJSValue(m_out.phi(pointerType(), slowResult, fastResult));
7329 return;
7330 }
7331 }
7332 } else {
7333 // If a node is producing JSImmutableButterfly, it must be contiguous.
7334 LValue immutableButterfly = lowCell(use);
7335
7336 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(CopyOnWriteArrayWithContiguous));
7337 LBasicBlock slowPath = m_out.newBlock();
7338 LBasicBlock continuation = m_out.newBlock();
7339
7340 LValue fastArray = allocateObject<JSArray>(structure, toButterfly(immutableButterfly), slowPath);
7341 ValueFromBlock fastResult = m_out.anchor(fastArray);
7342 m_out.jump(continuation);
7343
7344 m_out.appendTo(slowPath, continuation);
7345 LValue slowArray = vmCall(Int64, operationNewArrayBuffer, m_vmValue, weakStructure(structure), immutableButterfly);
7346 ValueFromBlock slowResult = m_out.anchor(slowArray);
7347 m_out.jump(continuation);
7348
7349 m_out.appendTo(continuation);
7350
7351 mutatorFence();
7352 setJSValue(m_out.phi(pointerType(), slowResult, fastResult));
7353 return;
7354 }
7355 }
7356
7357 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
7358 if (!bitVector->get(i))
7359 ++startLength;
7360 else {
7361 Edge& child = m_graph.varArgChild(m_node, i);
7362 if (child->op() == PhantomSpread && child->child1()->op() == PhantomNewArrayBuffer)
7363 startLength += child->child1()->castOperand<JSImmutableButterfly*>()->length();
7364 }
7365 }
7366
7367 if (startLength.hasOverflowed()) {
7368 terminate(Overflow);
7369 return;
7370 }
7371
7372 LValue length = m_out.constInt32(startLength);
7373
7374 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
7375 if (bitVector->get(i)) {
7376 Edge use = m_graph.varArgChild(m_node, i);
7377 CheckValue* lengthCheck = nullptr;
7378 if (use->op() == PhantomSpread) {
7379 if (use->child1()->op() == PhantomCreateRest) {
7380 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
7381 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
7382 LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
7383 return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
7384 }).iterator->value;
7385 lengthCheck = m_out.speculateAdd(length, spreadLength);
7386 }
7387 } else {
7388 LValue immutableButterfly = lowCell(use);
7389 lengthCheck = m_out.speculateAdd(length, m_out.load32(toButterfly(immutableButterfly), m_heaps.Butterfly_publicLength));
7390 }
7391
7392 if (lengthCheck) {
7393 blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
7394 length = lengthCheck;
7395 }
7396 }
7397 }
7398
7399 LValue exceedsMaxAllowedLength = m_out.aboveOrEqual(length, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
7400 blessSpeculation(m_out.speculate(exceedsMaxAllowedLength), Overflow, noValue(), nullptr, m_origin);
7401
7402 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous));
7403 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(length, structure);
7404 LValue result = arrayValues.array;
7405 LValue storage = arrayValues.butterfly;
7406 LValue index = m_out.constIntPtr(0);
7407
7408 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
7409 Edge use = m_graph.varArgChild(m_node, i);
7410 if (bitVector->get(i)) {
7411 if (use->op() == PhantomSpread) {
7412 if (use->child1()->op() == PhantomNewArrayBuffer) {
7413 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
7414 auto* array = use->child1()->castOperand<JSImmutableButterfly*>();
7415 for (unsigned i = 0; i < array->length(); ++i) {
7416 // Because resulted array from NewArrayWithSpread is always contiguous, we should not generate value
7417 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
7418 int64_t value = JSValue::encode(array->get(i));
7419 m_out.store64(m_out.constInt64(value), m_out.baseIndex(heap, storage, index, JSValue(), Checked<int32_t>(sizeof(JSValue)) * i));
7420 }
7421 index = m_out.add(index, m_out.constIntPtr(array->length()));
7422 } else {
7423 RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
7424 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
7425 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
7426
7427 LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
7428 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
7429
7430 LBasicBlock loopStart = m_out.newBlock();
7431 LBasicBlock continuation = m_out.newBlock();
7432
7433 ValueFromBlock loadIndexStart = m_out.anchor(m_out.constIntPtr(0));
7434 ValueFromBlock arrayIndexStart = m_out.anchor(index);
7435 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
7436
7437 m_out.branch(
7438 m_out.isZero64(length),
7439 unsure(continuation), unsure(loopStart));
7440
7441 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
7442
7443 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
7444 LValue loadIndex = m_out.phi(pointerType(), loadIndexStart);
7445
7446 LValue item = m_out.load64(m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
7447 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
7448
7449 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
7450 LValue nextLoadIndex = m_out.add(loadIndex, m_out.constIntPtr(1));
7451 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
7452
7453 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
7454 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
7455
7456 m_out.branch(
7457 m_out.below(nextLoadIndex, length),
7458 unsure(loopStart), unsure(continuation));
7459
7460 m_out.appendTo(continuation, lastNext);
7461 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
7462 }
7463 } else {
7464 LBasicBlock loopStart = m_out.newBlock();
7465 LBasicBlock continuation = m_out.newBlock();
7466
7467 LValue immutableButterfly = lowCell(use);
7468 LValue immutableButterflyStorage = toButterfly(immutableButterfly);
7469
7470 ValueFromBlock immutableButterflyIndexStart = m_out.anchor(m_out.constIntPtr(0));
7471 ValueFromBlock arrayIndexStart = m_out.anchor(index);
7472 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
7473
7474 LValue immutableButterflySize = m_out.zeroExtPtr(m_out.load32(immutableButterflyStorage, m_heaps.Butterfly_publicLength));
7475
7476 m_out.branch(
7477 m_out.isZero64(immutableButterflySize),
7478 unsure(continuation), unsure(loopStart));
7479
7480 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
7481
7482 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
7483 LValue immutableButterflyIndex = m_out.phi(pointerType(), immutableButterflyIndexStart);
7484
7485 LValue item = m_out.load64(m_out.baseIndex(m_heaps.indexedContiguousProperties, immutableButterflyStorage, immutableButterflyIndex));
7486 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
7487
7488 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
7489 LValue nextImmutableButterflyIndex = m_out.add(immutableButterflyIndex, m_out.constIntPtr(1));
7490 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
7491
7492 m_out.addIncomingToPhi(immutableButterflyIndex, m_out.anchor(nextImmutableButterflyIndex));
7493 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
7494
7495 m_out.branch(
7496 m_out.below(nextImmutableButterflyIndex, immutableButterflySize),
7497 unsure(loopStart), unsure(continuation));
7498
7499 m_out.appendTo(continuation, lastNext);
7500 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
7501 }
7502 } else {
7503 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
7504 LValue item = lowJSValue(use);
7505 m_out.store64(item, m_out.baseIndex(heap, storage, index));
7506 index = m_out.add(index, m_out.constIntPtr(1));
7507 }
7508 }
7509
7510 mutatorFence();
7511 setJSValue(result);
7512 return;
7513 }
7514
7515 ASSERT(m_node->numChildren());
7516 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
7517 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
7518 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
7519 BitVector* bitVector = m_node->bitVector();
7520 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
7521 Edge use = m_graph.m_varArgChildren[m_node->firstChild() + i];
7522 LValue value;
7523 if (bitVector->get(i))
7524 value = lowCell(use);
7525 else
7526 value = lowJSValue(use);
7527 m_out.store64(value, m_out.absolute(&buffer[i]));
7528 }
7529
7530 LValue result = vmCall(Int64, operationNewArrayWithSpreadSlow, weakPointer(globalObject), m_out.constIntPtr(buffer), m_out.constInt32(m_node->numChildren()));
7531
7532 setJSValue(result);
7533 }
7534
7535 void compileCreateThis()
7536 {
7537 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7538 LValue callee = lowCell(m_node->child1());
7539
7540 LBasicBlock isFunctionBlock = m_out.newBlock();
7541 LBasicBlock hasRareData = m_out.newBlock();
7542 LBasicBlock slowPath = m_out.newBlock();
7543 LBasicBlock continuation = m_out.newBlock();
7544
7545 m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowPath));
7546
7547 LBasicBlock lastNext = m_out.appendTo(isFunctionBlock, hasRareData);
7548 LValue rareDataTags = m_out.loadPtr(callee, m_heaps.JSFunction_executableOrRareData);
7549 m_out.branch(m_out.testIsZeroPtr(rareDataTags, m_out.constIntPtr(JSFunction::rareDataTag)), rarely(slowPath), usually(hasRareData));
7550
7551 m_out.appendTo(hasRareData, slowPath);
7552 LValue rareData = m_out.sub(rareDataTags, m_out.constIntPtr(JSFunction::rareDataTag));
7553 LValue allocator = m_out.loadPtr(rareData, m_heaps.FunctionRareData_allocator);
7554 LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_structure);
7555 LValue butterfly = m_out.constIntPtr(0);
7556 ValueFromBlock fastResult = m_out.anchor(allocateObject(allocator, structure, butterfly, slowPath));
7557 m_out.jump(continuation);
7558
7559 m_out.appendTo(slowPath, continuation);
7560 ValueFromBlock slowResult = m_out.anchor(vmCall(
7561 Int64, operationCreateThis, weakPointer(globalObject), callee, m_out.constInt32(m_node->inlineCapacity())));
7562 m_out.jump(continuation);
7563
7564 m_out.appendTo(continuation, lastNext);
7565 LValue result = m_out.phi(Int64, fastResult, slowResult);
7566
7567 mutatorFence();
7568 setJSValue(result);
7569 }
7570
7571 void compileCreatePromise()
7572 {
7573 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7574
7575 LValue callee = lowCell(m_node->child1());
7576
7577 LBasicBlock derivedCase = m_out.newBlock();
7578 LBasicBlock isFunctionBlock = m_out.newBlock();
7579 LBasicBlock hasRareData = m_out.newBlock();
7580 LBasicBlock hasStructure = m_out.newBlock();
7581 LBasicBlock checkGlobalObjectCase = m_out.newBlock();
7582 LBasicBlock fastAllocationCase = m_out.newBlock();
7583 LBasicBlock slowCase = m_out.newBlock();
7584 LBasicBlock continuation = m_out.newBlock();
7585
7586 ValueFromBlock promiseStructure = m_out.anchor(weakStructure(m_graph.registerStructure(m_node->isInternalPromise() ? globalObject->internalPromiseStructure() : globalObject->promiseStructure())));
7587 m_out.branch(m_out.equal(callee, weakPointer(m_node->isInternalPromise() ? globalObject->internalPromiseConstructor() : globalObject->promiseConstructor())), unsure(fastAllocationCase), unsure(derivedCase));
7588
7589 LBasicBlock lastNext = m_out.appendTo(derivedCase, isFunctionBlock);
7590 m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowCase));
7591
7592 m_out.appendTo(isFunctionBlock, hasRareData);
7593 LValue rareDataTags = m_out.loadPtr(callee, m_heaps.JSFunction_executableOrRareData);
7594 m_out.branch(m_out.testIsZeroPtr(rareDataTags, m_out.constIntPtr(JSFunction::rareDataTag)), rarely(slowCase), usually(hasRareData));
7595
7596 m_out.appendTo(hasRareData, hasStructure);
7597 LValue rareData = m_out.sub(rareDataTags, m_out.constIntPtr(JSFunction::rareDataTag));
7598 LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_internalFunctionAllocationProfile_structure);
7599 m_out.branch(m_out.isZero64(structure), rarely(slowCase), usually(hasStructure));
7600
7601 m_out.appendTo(hasStructure, checkGlobalObjectCase);
7602 m_out.branch(m_out.equal(m_out.loadPtr(structure, m_heaps.Structure_classInfo), m_out.constIntPtr(m_node->isInternalPromise() ? JSInternalPromise::info() : JSPromise::info())), usually(checkGlobalObjectCase), rarely(slowCase));
7603
7604 m_out.appendTo(checkGlobalObjectCase, fastAllocationCase);
7605 ValueFromBlock derivedStructure = m_out.anchor(structure);
7606 m_out.branch(m_out.equal(m_out.loadPtr(structure, m_heaps.Structure_globalObject), weakPointer(globalObject)), usually(fastAllocationCase), rarely(slowCase));
7607
7608 m_out.appendTo(fastAllocationCase, slowCase);
7609 LValue promise;
7610 if (m_node->isInternalPromise())
7611 promise = allocateObject<JSInternalPromise>(m_out.phi(pointerType(), promiseStructure, derivedStructure), m_out.intPtrZero, slowCase);
7612 else
7613 promise = allocateObject<JSPromise>(m_out.phi(pointerType(), promiseStructure, derivedStructure), m_out.intPtrZero, slowCase);
7614 m_out.store64(m_out.constInt64(JSValue::encode(jsNumber(static_cast<unsigned>(JSPromise::Status::Pending)))), promise, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSPromise::Field::Flags)]);
7615 m_out.store64(m_out.constInt64(JSValue::encode(jsUndefined())), promise, m_heaps.JSInternalFieldObjectImpl_internalFields[static_cast<unsigned>(JSPromise::Field::ReactionsOrResult)]);
7616 mutatorFence();
7617 ValueFromBlock fastResult = m_out.anchor(promise);
7618 m_out.jump(continuation);
7619
7620 m_out.appendTo(slowCase, continuation);
7621 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_node->isInternalPromise() ? operationCreateInternalPromise : operationCreatePromise, weakPointer(globalObject), callee));
7622 m_out.jump(continuation);
7623
7624 m_out.appendTo(continuation, lastNext);
7625 LValue result = m_out.phi(Int64, fastResult, slowResult);
7626
7627 setJSValue(result);
7628 }
7629
7630 template<typename JSClass, typename Operation>
7631 void compileCreateInternalFieldObject(Operation operation)
7632 {
7633 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7634
7635 LValue callee = lowCell(m_node->child1());
7636
7637 LBasicBlock isFunctionBlock = m_out.newBlock();
7638 LBasicBlock hasRareData = m_out.newBlock();
7639 LBasicBlock hasStructure = m_out.newBlock();
7640 LBasicBlock checkGlobalObjectCase = m_out.newBlock();
7641 LBasicBlock fastAllocationCase = m_out.newBlock();
7642 LBasicBlock slowCase = m_out.newBlock();
7643 LBasicBlock continuation = m_out.newBlock();
7644
7645 m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowCase));
7646
7647 LBasicBlock lastNext = m_out.appendTo(isFunctionBlock, hasRareData);
7648 LValue rareDataTags = m_out.loadPtr(callee, m_heaps.JSFunction_executableOrRareData);
7649 m_out.branch(m_out.testIsZeroPtr(rareDataTags, m_out.constIntPtr(JSFunction::rareDataTag)), rarely(slowCase), usually(hasRareData));
7650
7651 m_out.appendTo(hasRareData, hasStructure);
7652 LValue rareData = m_out.sub(rareDataTags, m_out.constIntPtr(JSFunction::rareDataTag));
7653 LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_internalFunctionAllocationProfile_structure);
7654 m_out.branch(m_out.isZero64(structure), rarely(slowCase), usually(hasStructure));
7655
7656 m_out.appendTo(hasStructure, checkGlobalObjectCase);
7657 m_out.branch(m_out.equal(m_out.loadPtr(structure, m_heaps.Structure_classInfo), m_out.constIntPtr(JSClass::info())), usually(checkGlobalObjectCase), rarely(slowCase));
7658
7659 m_out.appendTo(checkGlobalObjectCase, fastAllocationCase);
7660 m_out.branch(m_out.equal(m_out.loadPtr(structure, m_heaps.Structure_globalObject), weakPointer(globalObject)), usually(fastAllocationCase), rarely(slowCase));
7661
7662 m_out.appendTo(fastAllocationCase, slowCase);
7663 LValue object = allocateObject<JSClass>(structure, m_out.intPtrZero, slowCase);
7664 auto initialValues = JSClass::initialValues();
7665 static_assert(initialValues.size() == JSClass::numberOfInternalFields, "We don't support non-constant fields in create yet.");
7666 for (unsigned index = 0; index < initialValues.size(); ++index)
7667 m_out.store64(m_out.constInt64(JSValue::encode(initialValues[index])), object, m_heaps.JSInternalFieldObjectImpl_internalFields[index]);
7668 mutatorFence();
7669 ValueFromBlock fastResult = m_out.anchor(object);
7670 m_out.jump(continuation);
7671
7672 m_out.appendTo(slowCase, continuation);
7673 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operation, weakPointer(globalObject), callee));
7674 m_out.jump(continuation);
7675
7676 m_out.appendTo(continuation, lastNext);
7677 LValue result = m_out.phi(Int64, fastResult, slowResult);
7678
7679 setJSValue(result);
7680 }
7681
7682 void compileCreateGenerator()
7683 {
7684 compileCreateInternalFieldObject<JSGenerator>(operationCreateGenerator);
7685 }
7686
7687 void compileCreateAsyncGenerator()
7688 {
7689 compileCreateInternalFieldObject<JSAsyncGenerator>(operationCreateAsyncGenerator);
7690 }
7691
7692 void compileSpread()
7693 {
7694 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7695 if (m_node->child1()->op() == PhantomNewArrayBuffer) {
7696 ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(m_node->child1().node()));
7697
7698 // FIXME: JSImmutableButterfly::createFromArray should support re-using non contiguous indexing types as well.
7699 auto* immutableButterfly = m_node->child1()->castOperand<JSImmutableButterfly*>();
7700 if (hasContiguous(immutableButterfly->indexingType())) {
7701 setJSValue(frozenPointer(m_node->child1()->cellOperand()));
7702 return;
7703 }
7704
7705 LBasicBlock slowAllocation = m_out.newBlock();
7706 LBasicBlock continuation = m_out.newBlock();
7707
7708 ASSERT(immutableButterfly->length() <= MAX_STORAGE_VECTOR_LENGTH);
7709
7710 LValue fastImmutableButterflyValue = allocateVariableSizedCell<JSImmutableButterfly>(
7711 m_out.constIntPtr(JSImmutableButterfly::allocationSize(immutableButterfly->length())),
7712 m_graph.m_vm.immutableButterflyStructures[arrayIndexFromIndexingType(CopyOnWriteArrayWithContiguous) - NumberOfIndexingShapes].get(), slowAllocation);
7713 LValue fastImmutableButterflyStorage = toButterfly(fastImmutableButterflyValue);
7714 m_out.store32(m_out.constInt32(immutableButterfly->length()), fastImmutableButterflyStorage, m_heaps.Butterfly_publicLength);
7715 m_out.store32(m_out.constInt32(immutableButterfly->length()), fastImmutableButterflyStorage, m_heaps.Butterfly_vectorLength);
7716 ValueFromBlock fastImmutableButterfly = m_out.anchor(fastImmutableButterflyValue);
7717 m_out.jump(continuation);
7718
7719 LBasicBlock lastNext = m_out.appendTo(slowAllocation, continuation);
7720 ValueFromBlock slowImmutableButterfly = m_out.anchor(vmCall(pointerType(), operationCreateImmutableButterfly, weakPointer(globalObject), m_out.constInt32(immutableButterfly->length())));
7721 m_out.jump(continuation);
7722
7723 m_out.appendTo(continuation, lastNext);
7724 LValue immutableButterflyValue = m_out.phi(pointerType(), fastImmutableButterfly, slowImmutableButterfly);
7725 LValue immutableButterflyStorage = toButterfly(immutableButterflyValue);
7726 for (unsigned i = 0; i < immutableButterfly->length(); i++) {
7727 // Because forwarded values are drained as JSValue, we should not generate value
7728 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
7729 int64_t value = JSValue::encode(immutableButterfly->get(i));
7730 m_out.store64(
7731 m_out.constInt64(value),
7732 m_out.baseIndex(m_heaps.indexedContiguousProperties, immutableButterflyStorage, m_out.constIntPtr(i), jsNumber(i)));
7733 }
7734 mutatorFence();
7735 setJSValue(immutableButterflyValue);
7736 return;
7737 }
7738
7739 if (m_node->child1()->op() == PhantomCreateRest) {
7740 // This IR is rare to generate since it requires escaping the Spread
7741 // but not the CreateRest. In bytecode, we have only few operations that
7742 // accept Spread's result as input. This usually leads to the Spread node not
7743 // escaping. However, this can happen if for example we generate a PutStack on
7744 // the Spread but nothing escapes the CreateRest.
7745
7746 ASSERT(m_graph.isWatchingHavingABadTimeWatchpoint(m_node->child1().node()));
7747
7748 LBasicBlock fastAllocation = m_out.newBlock();
7749 LBasicBlock loopHeader = m_out.newBlock();
7750 LBasicBlock loopBody = m_out.newBlock();
7751 LBasicBlock slowAllocation = m_out.newBlock();
7752 LBasicBlock continuation = m_out.newBlock();
7753
7754 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
7755 unsigned numberOfArgumentsToSkip = m_node->child1()->numberOfArgumentsToSkip();
7756 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
7757 LValue length = getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
7758 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
7759 LValue size = m_out.add(
7760 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
7761 m_out.constIntPtr(JSImmutableButterfly::offsetOfData()));
7762 m_out.branch(m_out.above(length, m_out.constInt32(MAX_STORAGE_VECTOR_LENGTH)), rarely(slowAllocation), usually(fastAllocation));
7763
7764 LBasicBlock lastNext = m_out.appendTo(fastAllocation, slowAllocation);
7765 LValue fastArrayValue = allocateVariableSizedCell<JSImmutableButterfly>(size, m_graph.m_vm.immutableButterflyStructures[arrayIndexFromIndexingType(CopyOnWriteArrayWithContiguous) - NumberOfIndexingShapes].get(), slowAllocation);
7766 LValue fastArrayStorage = toButterfly(fastArrayValue);
7767 m_out.store32(length, fastArrayStorage, m_heaps.Butterfly_vectorLength);
7768 m_out.store32(length, fastArrayStorage, m_heaps.Butterfly_publicLength);
7769 ValueFromBlock fastArray = m_out.anchor(fastArrayValue);
7770 m_out.jump(loopHeader);
7771
7772 m_out.appendTo(slowAllocation, loopHeader);
7773 ValueFromBlock slowArray = m_out.anchor(vmCall(pointerType(), operationCreateImmutableButterfly, weakPointer(globalObject), length));
7774 m_out.jump(loopHeader);
7775
7776 m_out.appendTo(loopHeader, loopBody);
7777 LValue immutableButterfly = m_out.phi(pointerType(), fastArray, slowArray);
7778 LValue immutableButterflyStorage = toButterfly(immutableButterfly);
7779 ValueFromBlock startIndex = m_out.anchor(m_out.constIntPtr(0));
7780 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopBody));
7781
7782 m_out.appendTo(loopBody, continuation);
7783 LValue index = m_out.phi(pointerType(), startIndex);
7784 LValue value = m_out.load64(
7785 m_out.baseIndex(m_heaps.variables, sourceStart, index));
7786 m_out.store64(value, m_out.baseIndex(m_heaps.indexedContiguousProperties, immutableButterflyStorage, index));
7787 LValue nextIndex = m_out.add(m_out.constIntPtr(1), index);
7788 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
7789 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)), unsure(loopBody), unsure(continuation));
7790
7791 m_out.appendTo(continuation, lastNext);
7792 mutatorFence();
7793 setJSValue(immutableButterfly);
7794 return;
7795 }
7796
7797 LValue argument = lowCell(m_node->child1());
7798
7799 LValue result;
7800
7801 if (m_node->child1().useKind() == ArrayUse)
7802 speculateArray(m_node->child1());
7803
7804 if (m_graph.canDoFastSpread(m_node, m_state.forNode(m_node->child1()))) {
7805 LBasicBlock copyOnWriteContiguousCheck = m_out.newBlock();
7806 LBasicBlock copyOnWritePropagation = m_out.newBlock();
7807 LBasicBlock preLoop = m_out.newBlock();
7808 LBasicBlock loopSelection = m_out.newBlock();
7809 LBasicBlock contiguousLoopStart = m_out.newBlock();
7810 LBasicBlock doubleLoopStart = m_out.newBlock();
7811 LBasicBlock fastPath = m_out.newBlock();
7812 LBasicBlock slowPath = m_out.newBlock();
7813 LBasicBlock continuation = m_out.newBlock();
7814
7815 LValue indexingMode = m_out.load8ZeroExt32(argument, m_heaps.JSCell_indexingTypeAndMisc);
7816 LValue indexingShape = m_out.bitAnd(indexingMode, m_out.constInt32(IndexingShapeMask));
7817 LValue isOKIndexingType = m_out.belowOrEqual(
7818 m_out.sub(indexingShape, m_out.constInt32(Int32Shape)),
7819 m_out.constInt32(ContiguousShape - Int32Shape));
7820
7821 m_out.branch(isOKIndexingType, unsure(copyOnWriteContiguousCheck), unsure(slowPath));
7822 LBasicBlock lastNext = m_out.appendTo(copyOnWriteContiguousCheck, copyOnWritePropagation);
7823 LValue butterfly = m_out.loadPtr(argument, m_heaps.JSObject_butterfly);
7824 m_out.branch(m_out.equal(m_out.bitAnd(indexingMode, m_out.constInt32(IndexingModeMask)), m_out.constInt32(CopyOnWriteArrayWithContiguous)), unsure(copyOnWritePropagation), unsure(preLoop));
7825
7826 m_out.appendTo(copyOnWritePropagation, preLoop);
7827 ValueFromBlock sharedResult = m_out.anchor(m_out.add(butterfly, m_out.constIntPtr(-JSImmutableButterfly::offsetOfData())));
7828 m_out.jump(continuation);
7829
7830 m_out.appendTo(preLoop, fastPath);
7831 LValue length = m_out.load32NonNegative(butterfly, m_heaps.Butterfly_publicLength);
7832 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
7833 LValue size = m_out.add(
7834 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
7835 m_out.constIntPtr(JSImmutableButterfly::offsetOfData()));
7836 m_out.branch(m_out.above(length, m_out.constInt32(MAX_STORAGE_VECTOR_LENGTH)), rarely(slowPath), usually(fastPath));
7837
7838 m_out.appendTo(fastPath, loopSelection);
7839 LValue fastAllocation = allocateVariableSizedCell<JSImmutableButterfly>(size, m_graph.m_vm.immutableButterflyStructures[arrayIndexFromIndexingType(CopyOnWriteArrayWithContiguous) - NumberOfIndexingShapes].get(), slowPath);
7840 LValue fastStorage = toButterfly(fastAllocation);
7841 m_out.store32(length, fastStorage, m_heaps.Butterfly_vectorLength);
7842 m_out.store32(length, fastStorage, m_heaps.Butterfly_publicLength);
7843 ValueFromBlock fastResult = m_out.anchor(fastAllocation);
7844
7845 ValueFromBlock startIndexForContiguous = m_out.anchor(m_out.constIntPtr(0));
7846 ValueFromBlock startIndexForDouble = m_out.anchor(m_out.constIntPtr(0));
7847
7848 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopSelection));
7849
7850 m_out.appendTo(loopSelection, contiguousLoopStart);
7851 m_out.branch(m_out.equal(indexingShape, m_out.constInt32(DoubleShape)),
7852 unsure(doubleLoopStart), unsure(contiguousLoopStart));
7853
7854 {
7855 m_out.appendTo(contiguousLoopStart, doubleLoopStart);
7856 LValue index = m_out.phi(pointerType(), startIndexForContiguous);
7857
7858 TypedPointer loadSite = m_out.baseIndex(m_heaps.root, butterfly, index, ScaleEight); // We read TOP here since we can be reading either int32 or contiguous properties.
7859 LValue value = m_out.load64(loadSite);
7860 value = m_out.select(m_out.isZero64(value), m_out.constInt64(JSValue::encode(jsUndefined())), value);
7861 m_out.store64(value, m_out.baseIndex(m_heaps.indexedContiguousProperties, fastStorage, index));
7862
7863 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
7864 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
7865
7866 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
7867 unsure(contiguousLoopStart), unsure(continuation));
7868 }
7869
7870 {
7871 m_out.appendTo(doubleLoopStart, slowPath);
7872 LValue index = m_out.phi(pointerType(), startIndexForDouble);
7873
7874 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, butterfly, index));
7875 LValue isNaN = m_out.doubleNotEqualOrUnordered(value, value);
7876 LValue holeResult = m_out.constInt64(JSValue::encode(jsUndefined()));
7877 LValue normalResult = boxDouble(value);
7878 value = m_out.select(isNaN, holeResult, normalResult);
7879 m_out.store64(value, m_out.baseIndex(m_heaps.indexedContiguousProperties, fastStorage, index));
7880
7881 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
7882 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
7883
7884 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
7885 unsure(doubleLoopStart), unsure(continuation));
7886 }
7887
7888 m_out.appendTo(slowPath, continuation);
7889 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), operationSpreadFastArray, weakPointer(globalObject), argument));
7890 m_out.jump(continuation);
7891
7892 m_out.appendTo(continuation, lastNext);
7893 result = m_out.phi(pointerType(), sharedResult, fastResult, slowResult);
7894 mutatorFence();
7895 } else
7896 result = vmCall(pointerType(), operationSpreadGeneric, weakPointer(globalObject), argument);
7897
7898 setJSValue(result);
7899 }
7900
7901 void compileNewArrayBuffer()
7902 {
7903 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7904 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
7905 m_node->indexingMode()));
7906 auto* immutableButterfly = m_node->castOperand<JSImmutableButterfly*>();
7907
7908 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingMode())) {
7909 LBasicBlock slowPath = m_out.newBlock();
7910 LBasicBlock continuation = m_out.newBlock();
7911
7912 LValue fastArray = allocateObject<JSArray>(structure, m_out.constIntPtr(immutableButterfly->toButterfly()), slowPath);
7913 ValueFromBlock fastResult = m_out.anchor(fastArray);
7914 m_out.jump(continuation);
7915
7916 m_out.appendTo(slowPath, continuation);
7917 LValue slowArray = vmCall(Int64, operationNewArrayBuffer, m_vmValue, weakStructure(structure), frozenPointer(m_node->cellOperand()));
7918 ValueFromBlock slowResult = m_out.anchor(slowArray);
7919 m_out.jump(continuation);
7920
7921 m_out.appendTo(continuation);
7922
7923 mutatorFence();
7924 setJSValue(m_out.phi(pointerType(), slowResult, fastResult));
7925 return;
7926 }
7927
7928 setJSValue(vmCall(
7929 Int64, operationNewArrayBuffer, m_vmValue,
7930 weakStructure(structure), frozenPointer(m_node->cellOperand())));
7931 }
7932
7933 void compileNewArrayWithSize()
7934 {
7935 LValue publicLength = lowInt32(m_node->child1());
7936
7937 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7938 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
7939 m_node->indexingType()));
7940
7941 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
7942 IndexingType indexingType = m_node->indexingType();
7943 setJSValue(
7944 allocateJSArray(
7945 publicLength, publicLength, weakPointer(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType)), m_out.constInt32(indexingType)).array);
7946 mutatorFence();
7947 return;
7948 }
7949
7950 LValue structureValue = m_out.select(
7951 m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
7952 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))),
7953 weakStructure(structure));
7954 setJSValue(vmCall(Int64, operationNewArrayWithSize, weakPointer(globalObject), structureValue, publicLength, m_out.intPtrZero));
7955 }
7956
7957 void compileNewTypedArray()
7958 {
7959 TypedArrayType typedArrayType = m_node->typedArrayType();
7960 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
7961
7962 switch (m_node->child1().useKind()) {
7963 case Int32Use: {
7964 RegisteredStructure structure = m_graph.registerStructure(globalObject->typedArrayStructureConcurrently(typedArrayType));
7965
7966 LValue size = lowInt32(m_node->child1());
7967
7968 LBasicBlock smallEnoughCase = m_out.newBlock();
7969 LBasicBlock slowCase = m_out.newBlock();
7970 LBasicBlock continuation = m_out.newBlock();
7971
7972 ValueFromBlock noStorage = m_out.anchor(m_out.intPtrZero);
7973
7974 m_out.branch(
7975 m_out.above(size, m_out.constInt32(JSArrayBufferView::fastSizeLimit)),
7976 rarely(slowCase), usually(smallEnoughCase));
7977
7978 LBasicBlock lastNext = m_out.appendTo(smallEnoughCase, slowCase);
7979
7980 LValue byteSize =
7981 m_out.shl(m_out.zeroExtPtr(size), m_out.constInt32(logElementSize(typedArrayType)));
7982 if (elementSize(typedArrayType) < 8) {
7983 byteSize = m_out.bitAnd(
7984 m_out.add(byteSize, m_out.constIntPtr(7)),
7985 m_out.constIntPtr(~static_cast<intptr_t>(7)));
7986 }
7987
7988 LValue allocator = allocatorForSize(vm().primitiveGigacageAuxiliarySpace, byteSize, slowCase);
7989 LValue storage = allocateHeapCell(allocator, slowCase);
7990
7991 splatWords(
7992 storage,
7993 m_out.int32Zero,
7994 m_out.castToInt32(m_out.lShr(byteSize, m_out.constIntPtr(3))),
7995 m_out.int64Zero,
7996 m_heaps.typedArrayProperties);
7997
7998#if CPU(ARM64E)
7999 {
8000 LValue sizePtr = m_out.zeroExtPtr(size);
8001 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
8002 authenticate->appendSomeRegister(storage);
8003 authenticate->append(sizePtr, B3::ValueRep(B3::ValueRep::SomeLateRegister));
8004 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8005 jit.move(params[1].gpr(), params[0].gpr());
8006 jit.tagArrayPtr(params[2].gpr(), params[0].gpr());
8007 });
8008 storage = authenticate;
8009 }
8010#endif
8011
8012 ValueFromBlock haveStorage = m_out.anchor(storage);
8013
8014 LValue fastResultValue = nullptr;
8015 switch (typedArrayType) {
8016#define TYPED_ARRAY_TYPE_CASE(name) \
8017 case Type ## name: \
8018 fastResultValue = allocateObject<JS##name##Array>(structure, m_out.intPtrZero, slowCase); \
8019 break;
8020 FOR_EACH_TYPED_ARRAY_TYPE_EXCLUDING_DATA_VIEW(TYPED_ARRAY_TYPE_CASE)
8021#undef TYPED_ARRAY_TYPE_CASE
8022 case TypeDataView:
8023 fastResultValue = allocateObject<JSDataView>(structure, m_out.intPtrZero, slowCase);
8024 break;
8025 default:
8026 RELEASE_ASSERT_NOT_REACHED();
8027 break;
8028 }
8029
8030 m_out.storePtr(storage, fastResultValue, m_heaps.JSArrayBufferView_vector);
8031 m_out.store32(size, fastResultValue, m_heaps.JSArrayBufferView_length);
8032 m_out.store32(m_out.constInt32(FastTypedArray), fastResultValue, m_heaps.JSArrayBufferView_mode);
8033
8034 mutatorFence();
8035 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
8036 m_out.jump(continuation);
8037
8038 m_out.appendTo(slowCase, continuation);
8039 LValue storageValue = m_out.phi(pointerType(), noStorage, haveStorage);
8040
8041 VM& vm = this->vm();
8042 LValue slowResultValue = lazySlowPath(
8043 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
8044 return createLazyCallGenerator(vm,
8045 operationNewTypedArrayWithSizeForType(typedArrayType), locations[0].directGPR(), globalObject,
8046 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
8047 locations[2].directGPR());
8048 },
8049 size, storageValue);
8050 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
8051 m_out.jump(continuation);
8052
8053 m_out.appendTo(continuation, lastNext);
8054 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
8055 return;
8056 }
8057
8058 case UntypedUse: {
8059 LValue argument = lowJSValue(m_node->child1());
8060
8061 LValue result = vmCall(
8062 pointerType(), operationNewTypedArrayWithOneArgumentForType(typedArrayType),
8063 weakPointer(globalObject), weakPointer(globalObject->typedArrayStructureConcurrently(typedArrayType)), argument);
8064
8065 setJSValue(result);
8066 return;
8067 }
8068
8069 default:
8070 DFG_CRASH(m_graph, m_node, "Bad use kind");
8071 return;
8072 }
8073 }
8074
8075 void compileAllocatePropertyStorage()
8076 {
8077 LValue object = lowCell(m_node->child1());
8078 setStorage(allocatePropertyStorage(object, m_node->transition()->previous.get()));
8079 }
8080
8081 void compileReallocatePropertyStorage()
8082 {
8083 Transition* transition = m_node->transition();
8084 LValue object = lowCell(m_node->child1());
8085 LValue oldStorage = lowStorage(m_node->child2());
8086
8087 setStorage(
8088 reallocatePropertyStorage(
8089 object, oldStorage, transition->previous.get(), transition->next.get()));
8090 }
8091
8092 void compileNukeStructureAndSetButterfly()
8093 {
8094 nukeStructureAndSetButterfly(lowStorage(m_node->child2()), lowCell(m_node->child1()));
8095 }
8096
8097 void compileToNumber()
8098 {
8099 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8100 LValue value = lowJSValue(m_node->child1());
8101
8102 if (!(abstractValue(m_node->child1()).m_type & SpecBytecodeNumber))
8103 setJSValue(vmCall(Int64, operationToNumber, weakPointer(globalObject), value));
8104 else {
8105 LBasicBlock notNumber = m_out.newBlock();
8106 LBasicBlock continuation = m_out.newBlock();
8107
8108 ValueFromBlock fastResult = m_out.anchor(value);
8109 m_out.branch(isNumber(value, provenType(m_node->child1())), unsure(continuation), unsure(notNumber));
8110
8111 // notNumber case.
8112 LBasicBlock lastNext = m_out.appendTo(notNumber, continuation);
8113 // We have several attempts to remove ToNumber. But ToNumber still exists.
8114 // It means that converting non-numbers to numbers by this ToNumber is not rare.
8115 // Instead of the lazy slow path generator, we call the operation here.
8116 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operationToNumber, weakPointer(globalObject), value));
8117 m_out.jump(continuation);
8118
8119 // continuation case.
8120 m_out.appendTo(continuation, lastNext);
8121 setJSValue(m_out.phi(Int64, fastResult, slowResult));
8122 }
8123 }
8124
8125 void compileToNumeric()
8126 {
8127 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8128 LValue value = lowJSValue(m_node->child1());
8129
8130 if (abstractValue(m_node->child1()).m_type & (SpecBytecodeNumber | SpecBigInt)) {
8131 LBasicBlock notNumber = m_out.newBlock();
8132#if USE(BIGINT32)
8133 LBasicBlock notBigInt32 = m_out.newBlock();
8134#endif
8135 LBasicBlock isCellPath = m_out.newBlock();
8136 LBasicBlock slowPath = m_out.newBlock();
8137 LBasicBlock continuation = m_out.newBlock();
8138
8139 ValueFromBlock fastResult = m_out.anchor(value);
8140 m_out.branch(isNumber(value, provenType(m_node->child1())), unsure(continuation), unsure(notNumber));
8141
8142 // notNumber case.
8143 LBasicBlock lastNext = m_out.appendTo(notNumber, continuation);
8144#if USE(BIGINT32)
8145 m_out.branch(isBigInt32(value, provenType(m_node->child1())), unsure(continuation), unsure(notBigInt32));
8146 m_out.appendTo(notBigInt32);
8147#endif
8148 m_out.branch(isCell(value, provenType(m_node->child1())), unsure(isCellPath), unsure(slowPath));
8149
8150 m_out.appendTo(isCellPath);
8151 m_out.branch(isHeapBigInt(value, provenType(m_node->child1())), unsure(continuation), unsure(slowPath));
8152
8153 m_out.appendTo(slowPath);
8154 // We have several attempts to remove ToNumeric. But ToNumeric still exists.
8155 // It means that the slow path is not rare.
8156 // Instead of the lazy slow path generator, we call the operation here.
8157 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operationToNumeric, weakPointer(globalObject), value));
8158 m_out.jump(continuation);
8159
8160 // continuation case.
8161 m_out.appendTo(continuation, lastNext);
8162 setJSValue(m_out.phi(Int64, fastResult, slowResult));
8163 } else
8164 setJSValue(vmCall(Int64, operationToNumeric, weakPointer(globalObject), value));
8165 }
8166
8167 void compileCallNumberConstructor()
8168 {
8169 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8170#if USE(BIGINT32)
8171 if (m_node->child1().useKind() == BigInt32Use) {
8172 LValue value = lowBigInt32(m_node->child1());
8173 setInt32(unboxBigInt32(value));
8174 return;
8175 }
8176#endif
8177 LValue value = lowJSValue(m_node->child1());
8178
8179 LBasicBlock notNumber = m_out.newBlock();
8180 LBasicBlock continuation = m_out.newBlock();
8181
8182 ValueFromBlock fastResult = m_out.anchor(value);
8183 m_out.branch(isNumber(value, provenType(m_node->child1())), unsure(continuation), unsure(notNumber));
8184
8185 // notNumber case.
8186 LBasicBlock lastNext = m_out.appendTo(notNumber, continuation);
8187 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operationCallNumberConstructor, weakPointer(globalObject), value));
8188 m_out.jump(continuation);
8189
8190 // continuation case.
8191 m_out.appendTo(continuation, lastNext);
8192 setJSValue(m_out.phi(Int64, fastResult, slowResult));
8193 }
8194
8195
8196 void compileToStringOrCallStringConstructorOrStringValueOf()
8197 {
8198 ASSERT(m_node->op() != StringValueOf || m_node->child1().useKind() == UntypedUse);
8199 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8200 switch (m_node->child1().useKind()) {
8201 case StringObjectUse: {
8202 // FIXME: We should convert this to GetInternalField(0).
8203 // https://bugs.webkit.org/show_bug.cgi?id=209453
8204 LValue cell = lowCell(m_node->child1());
8205 speculateStringObjectForCell(m_node->child1(), cell);
8206 setJSValue(m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
8207 return;
8208 }
8209
8210 case StringOrStringObjectUse: {
8211 LValue cell = lowCell(m_node->child1());
8212 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
8213
8214 LBasicBlock notString = m_out.newBlock();
8215 LBasicBlock continuation = m_out.newBlock();
8216
8217 ValueFromBlock simpleResult = m_out.anchor(cell);
8218 m_out.branch(
8219 m_out.equal(type, m_out.constInt32(StringType)),
8220 unsure(continuation), unsure(notString));
8221
8222 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
8223 speculate(
8224 BadType, jsValueValue(cell), m_node->child1().node(),
8225 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
8226 ValueFromBlock unboxedResult = m_out.anchor(
8227 m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
8228 m_out.jump(continuation);
8229
8230 m_out.appendTo(continuation, lastNext);
8231 setJSValue(m_out.phi(Int64, simpleResult, unboxedResult));
8232
8233 m_interpreter.filter(m_node->child1(), SpecString | SpecStringObject);
8234 return;
8235 }
8236
8237 case CellUse:
8238 case NotCellUse:
8239 case UntypedUse: {
8240 LValue value;
8241 if (m_node->child1().useKind() == CellUse)
8242 value = lowCell(m_node->child1());
8243 else if (m_node->child1().useKind() == NotCellUse)
8244 value = lowNotCell(m_node->child1());
8245 else
8246 value = lowJSValue(m_node->child1());
8247
8248 LBasicBlock isCell = m_out.newBlock();
8249 LBasicBlock notString = m_out.newBlock();
8250 LBasicBlock continuation = m_out.newBlock();
8251
8252 LValue isCellPredicate;
8253 if (m_node->child1().useKind() == CellUse)
8254 isCellPredicate = m_out.booleanTrue;
8255 else if (m_node->child1().useKind() == NotCellUse)
8256 isCellPredicate = m_out.booleanFalse;
8257 else
8258 isCellPredicate = this->isCell(value, provenType(m_node->child1()));
8259 m_out.branch(isCellPredicate, unsure(isCell), unsure(notString));
8260
8261 LBasicBlock lastNext = m_out.appendTo(isCell, notString);
8262 ValueFromBlock simpleResult = m_out.anchor(value);
8263 LValue isStringPredicate;
8264 if (m_node->child1()->prediction() & SpecString) {
8265 isStringPredicate = isString(value, provenType(m_node->child1()));
8266 } else
8267 isStringPredicate = m_out.booleanFalse;
8268 m_out.branch(isStringPredicate, unsure(continuation), unsure(notString));
8269
8270 m_out.appendTo(notString, continuation);
8271 LValue result;
8272 if (m_node->child1().useKind() == CellUse) {
8273 ASSERT(m_node->op() != StringValueOf);
8274 result = vmCall(Int64, m_node->op() == ToString ? operationToStringOnCell : operationCallStringConstructorOnCell, weakPointer(globalObject), value);
8275 } else {
8276 auto* operation = m_node->op() == ToString
8277 ? operationToString : m_node->op() == StringValueOf
8278 ? operationStringValueOf : operationCallStringConstructor;
8279 result = vmCall(Int64, operation, weakPointer(globalObject), value);
8280 }
8281 ValueFromBlock convertedResult = m_out.anchor(result);
8282 m_out.jump(continuation);
8283
8284 m_out.appendTo(continuation, lastNext);
8285 setJSValue(m_out.phi(Int64, simpleResult, convertedResult));
8286 return;
8287 }
8288
8289 case Int32Use:
8290 setJSValue(vmCall(Int64, operationInt32ToStringWithValidRadix, weakPointer(globalObject), lowInt32(m_node->child1()), m_out.constInt32(10)));
8291 return;
8292
8293 case Int52RepUse:
8294 setJSValue(vmCall(Int64, operationInt52ToStringWithValidRadix, weakPointer(globalObject), lowStrictInt52(m_node->child1()), m_out.constInt32(10)));
8295 return;
8296
8297 case DoubleRepUse:
8298 setJSValue(vmCall(Int64, operationDoubleToStringWithValidRadix, weakPointer(globalObject), lowDouble(m_node->child1()), m_out.constInt32(10)));
8299 return;
8300
8301 default:
8302 DFG_CRASH(m_graph, m_node, "Bad use kind");
8303 break;
8304 }
8305 }
8306
8307 void compileFunctionToString()
8308 {
8309 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8310
8311 LBasicBlock notBoundFunctionCase = m_out.newBlock();
8312 LBasicBlock functionExecutableCase = m_out.newBlock();
8313 LBasicBlock nativeExecutableCase = m_out.newBlock();
8314 LBasicBlock testPtr = m_out.newBlock();
8315 LBasicBlock hasRareData = m_out.newBlock();
8316 LBasicBlock slowCase = m_out.newBlock();
8317 LBasicBlock continuation = m_out.newBlock();
8318
8319 LValue function = lowCell(m_node->child1());
8320 speculateFunction(m_node->child1(), function);
8321
8322 LValue structure = loadStructure(function);
8323 LValue classInfo = m_out.loadPtr(structure, m_heaps.Structure_classInfo);
8324 static_assert(std::is_final_v<JSBoundFunction>, "We don't handle subclasses when comparing classInfo below");
8325 m_out.branch(m_out.equal(classInfo, m_out.constIntPtr(JSBoundFunction::info())), unsure(slowCase), unsure(notBoundFunctionCase));
8326
8327 LBasicBlock lastNext = m_out.appendTo(notBoundFunctionCase, nativeExecutableCase);
8328 LValue executable = getExecutable(function);
8329 m_out.branch(isType(executable, NativeExecutableType), unsure(nativeExecutableCase), unsure(functionExecutableCase));
8330
8331 m_out.appendTo(nativeExecutableCase, functionExecutableCase);
8332 ValueFromBlock nativeResult = m_out.anchor(m_out.loadPtr(executable, m_heaps.NativeExecutable_asString));
8333 m_out.jump(testPtr);
8334
8335 m_out.appendTo(functionExecutableCase, testPtr);
8336 LValue rareData = m_out.loadPtr(executable, m_heaps.FunctionExecutable_rareData);
8337 m_out.branch(m_out.notNull(rareData), usually(hasRareData), rarely(slowCase));
8338
8339 m_out.appendTo(hasRareData, slowCase);
8340 ValueFromBlock functionResult = m_out.anchor(m_out.loadPtr(rareData, m_heaps.FunctionExecutableRareData_asString));
8341 m_out.jump(testPtr);
8342
8343 m_out.appendTo(testPtr, continuation);
8344 LValue asString = m_out.phi(pointerType(), nativeResult, functionResult);
8345 ValueFromBlock fastResult = m_out.anchor(asString);
8346 m_out.branch(m_out.notNull(asString), usually(continuation), rarely(slowCase));
8347
8348 m_out.appendTo(slowCase, continuation);
8349 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), operationFunctionToString, weakPointer(globalObject), function));
8350 m_out.jump(continuation);
8351
8352 m_out.appendTo(continuation, lastNext);
8353 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
8354 }
8355
8356 void compileToPrimitive()
8357 {
8358 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8359 LValue value = lowJSValue(m_node->child1());
8360
8361 LBasicBlock isCellCase = m_out.newBlock();
8362 LBasicBlock isObjectCase = m_out.newBlock();
8363 LBasicBlock continuation = m_out.newBlock();
8364
8365 Vector<ValueFromBlock, 3> results;
8366
8367 results.append(m_out.anchor(value));
8368 m_out.branch(
8369 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
8370
8371 LBasicBlock lastNext = m_out.appendTo(isCellCase, isObjectCase);
8372 results.append(m_out.anchor(value));
8373 m_out.branch(
8374 isObject(value, provenType(m_node->child1())),
8375 unsure(isObjectCase), unsure(continuation));
8376
8377 m_out.appendTo(isObjectCase, continuation);
8378 results.append(m_out.anchor(vmCall(
8379 Int64, operationToPrimitive, weakPointer(globalObject), value)));
8380 m_out.jump(continuation);
8381
8382 m_out.appendTo(continuation, lastNext);
8383 setJSValue(m_out.phi(Int64, results));
8384 }
8385
8386 void compileToPropertyKey()
8387 {
8388 ASSERT(m_node->child1().useKind() == UntypedUse);
8389 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8390 LValue value = lowJSValue(m_node->child1());
8391
8392 LBasicBlock isCellCase = m_out.newBlock();
8393 LBasicBlock notStringCase = m_out.newBlock();
8394 LBasicBlock slowPathCase = m_out.newBlock();
8395 LBasicBlock continuation = m_out.newBlock();
8396
8397 Vector<ValueFromBlock, 3> results;
8398 m_out.branch(
8399 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(slowPathCase));
8400
8401 LBasicBlock lastNext = m_out.appendTo(isCellCase, notStringCase);
8402 results.append(m_out.anchor(value));
8403 m_out.branch(isString(value, provenType(m_node->child1())), unsure(continuation), unsure(notStringCase));
8404
8405 m_out.appendTo(notStringCase, slowPathCase);
8406 results.append(m_out.anchor(value));
8407 m_out.branch(isSymbol(value, provenType(m_node->child1())), unsure(continuation), unsure(slowPathCase));
8408
8409 m_out.appendTo(slowPathCase, continuation);
8410 results.append(m_out.anchor(vmCall(
8411 Int64, operationToPropertyKey, weakPointer(globalObject), value)));
8412 m_out.jump(continuation);
8413
8414 m_out.appendTo(continuation, lastNext);
8415 setJSValue(m_out.phi(Int64, results));
8416 }
8417
8418 void compileMakeRope()
8419 {
8420 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8421
8422 struct FlagsAndLength {
8423 LValue flags;
8424 LValue length;
8425 };
8426
8427 Edge edges[3] = {
8428 m_node->child1(),
8429 m_node->child2(),
8430 m_node->child3(),
8431 };
8432 LValue kids[3];
8433 unsigned numKids;
8434 kids[0] = lowCell(edges[0]);
8435 kids[1] = lowCell(edges[1]);
8436 if (edges[2]) {
8437 kids[2] = lowCell(edges[2]);
8438 numKids = 3;
8439 } else {
8440 kids[2] = nullptr;
8441 numKids = 2;
8442 }
8443
8444 LBasicBlock emptyCase = m_out.newBlock();
8445 LBasicBlock slowPath = m_out.newBlock();
8446 LBasicBlock continuation = m_out.newBlock();
8447
8448 Allocator allocator = allocatorForNonVirtualConcurrently<JSRopeString>(vm(), sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
8449
8450 LValue result = allocateCell(
8451 m_out.constIntPtr(allocator.localAllocator()), vm().stringStructure.get(), slowPath);
8452
8453 // This puts nullptr for the first fiber. It makes visitChildren safe even if this JSRopeString is discarded due to the speculation failure in the following path.
8454 m_out.storePtr(m_out.constIntPtr(JSString::isRopeInPointer), result, m_heaps.JSRopeString_fiber0);
8455
8456 auto getFlagsAndLength = [&] (Edge& edge, LValue child) {
8457 if (JSString* string = edge->dynamicCastConstant<JSString*>(vm())) {
8458 return FlagsAndLength {
8459 m_out.constInt32(string->is8Bit() ? StringImpl::flagIs8Bit() : 0),
8460 m_out.constInt32(string->length())
8461 };
8462 }
8463
8464 LBasicBlock continuation = m_out.newBlock();
8465 LBasicBlock ropeCase = m_out.newBlock();
8466 LBasicBlock notRopeCase = m_out.newBlock();
8467
8468 m_out.branch(isRopeString(child, edge), unsure(ropeCase), unsure(notRopeCase));
8469
8470 LBasicBlock lastNext = m_out.appendTo(ropeCase, notRopeCase);
8471 ValueFromBlock flagsForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_flags));
8472 ValueFromBlock lengthForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_length));
8473 m_out.jump(continuation);
8474
8475 m_out.appendTo(notRopeCase, continuation);
8476 LValue stringImpl = m_out.loadPtr(child, m_heaps.JSString_value);
8477 ValueFromBlock flagsForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_hashAndFlags));
8478 ValueFromBlock lengthForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length));
8479 m_out.jump(continuation);
8480
8481 m_out.appendTo(continuation, lastNext);
8482 return FlagsAndLength {
8483 m_out.phi(Int32, flagsForRope, flagsForNonRope),
8484 m_out.phi(Int32, lengthForRope, lengthForNonRope)
8485 };
8486 };
8487
8488 FlagsAndLength flagsAndLength = getFlagsAndLength(edges[0], kids[0]);
8489 for (unsigned i = 1; i < numKids; ++i) {
8490 auto mergeFlagsAndLength = [&] (Edge& edge, LValue child, FlagsAndLength previousFlagsAndLength) {
8491 FlagsAndLength flagsAndLength = getFlagsAndLength(edge, child);
8492 LValue flags = m_out.bitAnd(previousFlagsAndLength.flags, flagsAndLength.flags);
8493 CheckValue* lengthCheck = m_out.speculateAdd(previousFlagsAndLength.length, flagsAndLength.length);
8494 blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin);
8495 return FlagsAndLength {
8496 flags,
8497 lengthCheck
8498 };
8499 };
8500 flagsAndLength = mergeFlagsAndLength(edges[i], kids[i], flagsAndLength);
8501 }
8502
8503 m_out.storePtr(
8504 m_out.bitOr(
8505 m_out.bitOr(kids[0], m_out.constIntPtr(JSString::isRopeInPointer)),
8506 m_out.bitAnd(m_out.constIntPtr(JSRopeString::is8BitInPointer), m_out.zeroExtPtr(flagsAndLength.flags))),
8507 result, m_heaps.JSRopeString_fiber0);
8508 m_out.storePtr(
8509 m_out.bitOr(m_out.zeroExtPtr(flagsAndLength.length), m_out.shl(kids[1], m_out.constInt32(32))),
8510 result, m_heaps.JSRopeString_fiber1);
8511 if (numKids == 2)
8512 m_out.storePtr(m_out.lShr(kids[1], m_out.constInt32(32)), result, m_heaps.JSRopeString_fiber2);
8513 else
8514 m_out.storePtr(m_out.bitOr(m_out.lShr(kids[1], m_out.constInt32(32)), m_out.shl(kids[2], m_out.constInt32(16))), result, m_heaps.JSRopeString_fiber2);
8515
8516 mutatorFence();
8517 ValueFromBlock fastResult = m_out.anchor(result);
8518 m_out.branch(m_out.isZero32(flagsAndLength.length), rarely(emptyCase), usually(continuation));
8519
8520 LBasicBlock lastNext = m_out.appendTo(emptyCase, slowPath);
8521 ValueFromBlock emptyResult = m_out.anchor(weakPointer(jsEmptyString(m_graph.m_vm)));
8522 m_out.jump(continuation);
8523
8524 m_out.appendTo(slowPath, continuation);
8525 LValue slowResultValue;
8526 VM& vm = this->vm();
8527 switch (numKids) {
8528 case 2:
8529 slowResultValue = lazySlowPath(
8530 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
8531 return createLazyCallGenerator(vm,
8532 operationMakeRope2, locations[0].directGPR(), globalObject, locations[1].directGPR(),
8533 locations[2].directGPR());
8534 }, kids[0], kids[1]);
8535 break;
8536 case 3:
8537 slowResultValue = lazySlowPath(
8538 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
8539 return createLazyCallGenerator(vm,
8540 operationMakeRope3, locations[0].directGPR(), globalObject, locations[1].directGPR(),
8541 locations[2].directGPR(), locations[3].directGPR());
8542 }, kids[0], kids[1], kids[2]);
8543 break;
8544 default:
8545 DFG_CRASH(m_graph, m_node, "Bad number of children");
8546 break;
8547 }
8548 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
8549 m_out.jump(continuation);
8550
8551 m_out.appendTo(continuation, lastNext);
8552 setJSValue(m_out.phi(Int64, fastResult, emptyResult, slowResult));
8553 }
8554
8555 void compileStringCharAt()
8556 {
8557 LValue base = lowString(m_graph.child(m_node, 0));
8558 LValue index = lowInt32(m_graph.child(m_node, 1));
8559 LValue storage = lowStorage(m_graph.child(m_node, 2));
8560
8561 LBasicBlock fastPath = m_out.newBlock();
8562 LBasicBlock slowPath = m_out.newBlock();
8563 LBasicBlock continuation = m_out.newBlock();
8564
8565 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
8566 m_out.branch(
8567 m_out.aboveOrEqual(
8568 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)),
8569 rarely(slowPath), usually(fastPath));
8570
8571 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
8572
8573 LBasicBlock is8Bit = m_out.newBlock();
8574 LBasicBlock is16Bit = m_out.newBlock();
8575 LBasicBlock bitsContinuation = m_out.newBlock();
8576 LBasicBlock bigCharacter = m_out.newBlock();
8577
8578 m_out.branch(
8579 m_out.testIsZero32(
8580 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
8581 m_out.constInt32(StringImpl::flagIs8Bit())),
8582 unsure(is16Bit), unsure(is8Bit));
8583
8584 m_out.appendTo(is8Bit, is16Bit);
8585
8586 // FIXME: Need to cage strings!
8587 // https://bugs.webkit.org/show_bug.cgi?id=174924
8588 ValueFromBlock char8Bit = m_out.anchor(
8589 m_out.load8ZeroExt32(m_out.baseIndex(
8590 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
8591 provenValue(m_graph.child(m_node, 1)))));
8592 m_out.jump(bitsContinuation);
8593
8594 m_out.appendTo(is16Bit, bigCharacter);
8595
8596 LValue char16BitValue = m_out.load16ZeroExt32(
8597 m_out.baseIndex(
8598 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
8599 provenValue(m_graph.child(m_node, 1))));
8600 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
8601 m_out.branch(
8602 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
8603 rarely(bigCharacter), usually(bitsContinuation));
8604
8605 m_out.appendTo(bigCharacter, bitsContinuation);
8606
8607 Vector<ValueFromBlock, 4> results;
8608 results.append(m_out.anchor(vmCall(
8609 Int64, operationSingleCharacterString,
8610 m_vmValue, char16BitValue)));
8611 m_out.jump(continuation);
8612
8613 m_out.appendTo(bitsContinuation, slowPath);
8614
8615 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
8616
8617 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
8618
8619 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
8620 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
8621 m_out.jump(continuation);
8622
8623 m_out.appendTo(slowPath, continuation);
8624
8625 if (m_node->arrayMode().isInBounds()) {
8626 speculate(OutOfBounds, noValue(), nullptr, m_out.booleanTrue);
8627 results.append(m_out.anchor(m_out.intPtrZero));
8628 } else {
8629 // FIXME: Revisit JSGlobalObject.
8630 // https://bugs.webkit.org/show_bug.cgi?id=203204
8631 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8632 Structure* stringPrototypeStructure = globalObject->stringPrototype()->structure(vm());
8633 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(vm());
8634 WTF::dependentLoadLoadFence();
8635
8636 if (globalObject->stringPrototypeChainIsSane()) {
8637 // FIXME: This could be captured using a Speculation mode that means
8638 // "out-of-bounds loads return a trivial value", something like
8639 // OutOfBoundsSaneChain.
8640 // https://bugs.webkit.org/show_bug.cgi?id=144668
8641
8642 m_graph.registerAndWatchStructureTransition(stringPrototypeStructure);
8643 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
8644
8645 LBasicBlock negativeIndex = m_out.newBlock();
8646
8647 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
8648 m_out.branch(
8649 m_out.lessThan(index, m_out.int32Zero),
8650 rarely(negativeIndex), usually(continuation));
8651
8652 m_out.appendTo(negativeIndex, continuation);
8653 }
8654
8655 results.append(m_out.anchor(vmCall(Int64, operationGetByValStringInt, weakPointer(globalObject), base, index)));
8656 }
8657
8658 m_out.jump(continuation);
8659
8660 m_out.appendTo(continuation, lastNext);
8661 // We have to keep base alive since that keeps storage alive.
8662 ensureStillAliveHere(base);
8663 setJSValue(m_out.phi(Int64, results));
8664 }
8665
8666 void compileStringCharCodeAt()
8667 {
8668 LBasicBlock is8Bit = m_out.newBlock();
8669 LBasicBlock is16Bit = m_out.newBlock();
8670 LBasicBlock continuation = m_out.newBlock();
8671
8672 LValue base = lowString(m_node->child1());
8673 LValue index = lowInt32(m_node->child2());
8674 LValue storage = lowStorage(m_node->child3());
8675
8676 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
8677
8678 speculate(
8679 Uncountable, noValue(), nullptr,
8680 m_out.aboveOrEqual(
8681 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)));
8682
8683 m_out.branch(
8684 m_out.testIsZero32(
8685 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
8686 m_out.constInt32(StringImpl::flagIs8Bit())),
8687 unsure(is16Bit), unsure(is8Bit));
8688
8689 LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
8690
8691 // FIXME: need to cage strings!
8692 // https://bugs.webkit.org/show_bug.cgi?id=174924
8693 ValueFromBlock char8Bit = m_out.anchor(
8694 m_out.load8ZeroExt32(m_out.baseIndex(
8695 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
8696 provenValue(m_node->child2()))));
8697 m_out.jump(continuation);
8698
8699 m_out.appendTo(is16Bit, continuation);
8700
8701 ValueFromBlock char16Bit = m_out.anchor(
8702 m_out.load16ZeroExt32(m_out.baseIndex(
8703 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
8704 provenValue(m_node->child2()))));
8705 m_out.jump(continuation);
8706
8707 m_out.appendTo(continuation, lastNext);
8708
8709 // We have to keep base alive since that keeps storage alive.
8710 ensureStillAliveHere(base);
8711 setInt32(m_out.phi(Int32, char8Bit, char16Bit));
8712 }
8713
8714 void compileStringCodePointAt()
8715 {
8716 LBasicBlock is8Bit = m_out.newBlock();
8717 LBasicBlock is16Bit = m_out.newBlock();
8718 LBasicBlock isLeadSurrogate = m_out.newBlock();
8719 LBasicBlock mayHaveTrailSurrogate = m_out.newBlock();
8720 LBasicBlock hasTrailSurrogate = m_out.newBlock();
8721 LBasicBlock continuation = m_out.newBlock();
8722
8723 LValue base = lowString(m_node->child1());
8724 LValue index = lowInt32(m_node->child2());
8725 LValue storage = lowStorage(m_node->child3());
8726
8727 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
8728 LValue length = m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length);
8729
8730 speculate(Uncountable, noValue(), nullptr, m_out.aboveOrEqual(index, length));
8731
8732 m_out.branch(
8733 m_out.testIsZero32(
8734 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
8735 m_out.constInt32(StringImpl::flagIs8Bit())),
8736 unsure(is16Bit), unsure(is8Bit));
8737
8738 LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
8739 // FIXME: Need to cage strings!
8740 // https://bugs.webkit.org/show_bug.cgi?id=174924
8741 ValueFromBlock char8Bit = m_out.anchor(
8742 m_out.load8ZeroExt32(m_out.baseIndex(
8743 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
8744 provenValue(m_node->child2()))));
8745 m_out.jump(continuation);
8746
8747 m_out.appendTo(is16Bit, isLeadSurrogate);
8748 LValue leadCharacter = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(index), provenValue(m_node->child2())));
8749 ValueFromBlock char16Bit = m_out.anchor(leadCharacter);
8750 LValue nextIndex = m_out.add(index, m_out.int32One);
8751 m_out.branch(m_out.aboveOrEqual(nextIndex, length), unsure(continuation), unsure(isLeadSurrogate));
8752
8753 m_out.appendTo(isLeadSurrogate, mayHaveTrailSurrogate);
8754 m_out.branch(m_out.notEqual(m_out.bitAnd(leadCharacter, m_out.constInt32(0xfffffc00)), m_out.constInt32(0xd800)), unsure(continuation), unsure(mayHaveTrailSurrogate));
8755
8756 m_out.appendTo(mayHaveTrailSurrogate, hasTrailSurrogate);
8757 JSValue indexValue = provenValue(m_node->child2());
8758 JSValue nextIndexValue;
8759 if (indexValue && indexValue.isInt32() && indexValue.asInt32() != INT32_MAX)
8760 nextIndexValue = jsNumber(indexValue.asInt32() + 1);
8761 LValue trailCharacter = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(nextIndex), nextIndexValue));
8762 m_out.branch(m_out.notEqual(m_out.bitAnd(trailCharacter, m_out.constInt32(0xfffffc00)), m_out.constInt32(0xdc00)), unsure(continuation), unsure(hasTrailSurrogate));
8763
8764 m_out.appendTo(hasTrailSurrogate, continuation);
8765 ValueFromBlock charSurrogatePair = m_out.anchor(m_out.sub(m_out.add(m_out.shl(leadCharacter, m_out.constInt32(10)), trailCharacter), m_out.constInt32(U16_SURROGATE_OFFSET)));
8766 m_out.jump(continuation);
8767
8768 m_out.appendTo(continuation, lastNext);
8769 // We have to keep base alive since that keeps storage alive.
8770 ensureStillAliveHere(base);
8771 setInt32(m_out.phi(Int32, char8Bit, char16Bit, charSurrogatePair));
8772 }
8773
8774 void compileStringFromCharCode()
8775 {
8776 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
8777 Edge childEdge = m_node->child1();
8778
8779 if (childEdge.useKind() == UntypedUse) {
8780 LValue result = vmCall(
8781 Int64, operationStringFromCharCodeUntyped, weakPointer(globalObject),
8782 lowJSValue(childEdge));
8783 setJSValue(result);
8784 return;
8785 }
8786
8787 DFG_ASSERT(m_graph, m_node, childEdge.useKind() == Int32Use, childEdge.useKind());
8788
8789 LValue value = lowInt32(childEdge);
8790
8791 LBasicBlock smallIntCase = m_out.newBlock();
8792 LBasicBlock slowCase = m_out.newBlock();
8793 LBasicBlock continuation = m_out.newBlock();
8794
8795 m_out.branch(
8796 m_out.above(value, m_out.constInt32(maxSingleCharacterString)),
8797 rarely(slowCase), usually(smallIntCase));
8798
8799 LBasicBlock lastNext = m_out.appendTo(smallIntCase, slowCase);
8800
8801 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
8802 LValue fastResultValue = m_out.loadPtr(
8803 m_out.baseIndex(m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(value)));
8804 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
8805 m_out.jump(continuation);
8806
8807 m_out.appendTo(slowCase, continuation);
8808
8809 LValue slowResultValue = vmCall(
8810 pointerType(), operationStringFromCharCode, weakPointer(globalObject), value);
8811 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
8812 m_out.jump(continuation);
8813
8814 m_out.appendTo(continuation, lastNext);
8815
8816 setJSValue(m_out.phi(Int64, fastResult, slowResult));
8817 }
8818
8819 void compileGetByOffset()
8820 {
8821 StorageAccessData& data = m_node->storageAccessData();
8822
8823 LValue base = lowCell(m_node->child2());
8824 LValue value = loadProperty(lowStorage(m_node->child1()), data.identifierNumber, data.offset);
8825 // We have to keep base alive since that keeps content of storage alive.
8826 ensureStillAliveHere(base);
8827 setJSValue(value);
8828 }
8829
8830 void compileGetGetter()
8831 {
8832 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_getter));
8833 }
8834
8835 void compileGetSetter()
8836 {
8837 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_setter));
8838 }
8839
8840 void compileMultiGetByOffset()
8841 {
8842 LValue base = lowCell(m_node->child1());
8843
8844 MultiGetByOffsetData& data = m_node->multiGetByOffsetData();
8845
8846 Vector<LBasicBlock, 2> blocks(data.cases.size());
8847 for (unsigned i = data.cases.size(); i--;)
8848 blocks[i] = m_out.newBlock();
8849 LBasicBlock exit = m_out.newBlock();
8850 LBasicBlock continuation = m_out.newBlock();
8851
8852 Vector<SwitchCase, 2> cases;
8853 RegisteredStructureSet baseSet;
8854 for (unsigned i = data.cases.size(); i--;) {
8855 MultiGetByOffsetCase getCase = data.cases[i];
8856 for (unsigned j = getCase.set().size(); j--;) {
8857 RegisteredStructure structure = getCase.set()[j];
8858 baseSet.add(structure);
8859 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
8860 }
8861 }
8862 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
8863 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
8864
8865 LBasicBlock lastNext = m_out.m_nextBlock;
8866
8867 Vector<ValueFromBlock, 2> results;
8868 for (unsigned i = data.cases.size(); i--;) {
8869 MultiGetByOffsetCase getCase = data.cases[i];
8870 GetByOffsetMethod method = getCase.method();
8871
8872 m_out.appendTo(blocks[i], i + 1 < data.cases.size() ? blocks[i + 1] : exit);
8873
8874 LValue result;
8875
8876 switch (method.kind()) {
8877 case GetByOffsetMethod::Invalid:
8878 RELEASE_ASSERT_NOT_REACHED();
8879 break;
8880
8881 case GetByOffsetMethod::Constant:
8882 result = m_out.constInt64(JSValue::encode(method.constant()->value()));
8883 break;
8884
8885 case GetByOffsetMethod::Load:
8886 case GetByOffsetMethod::LoadFromPrototype: {
8887 LValue propertyBase;
8888 if (method.kind() == GetByOffsetMethod::Load)
8889 propertyBase = base;
8890 else
8891 propertyBase = weakPointer(method.prototype()->value().asCell());
8892 if (!isInlineOffset(method.offset()))
8893 propertyBase = m_out.loadPtr(propertyBase, m_heaps.JSObject_butterfly);
8894 result = loadProperty(
8895 propertyBase, data.identifierNumber, method.offset());
8896 break;
8897 } }
8898
8899 results.append(m_out.anchor(result));
8900 m_out.jump(continuation);
8901 }
8902
8903 m_out.appendTo(exit, continuation);
8904 if (!structuresChecked)
8905 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
8906 m_out.unreachable();
8907
8908 m_out.appendTo(continuation, lastNext);
8909 // We have to keep base alive since that keeps storage alive.
8910 ensureStillAliveHere(base);
8911 setJSValue(m_out.phi(Int64, results));
8912 }
8913
8914 void compilePutByOffset()
8915 {
8916 StorageAccessData& data = m_node->storageAccessData();
8917
8918 storeProperty(
8919 lowJSValue(m_node->child3()),
8920 lowStorage(m_node->child1()), data.identifierNumber, data.offset);
8921 }
8922
8923 void compileMultiPutByOffset()
8924 {
8925 LValue base = lowCell(m_node->child1());
8926 LValue value = lowJSValue(m_node->child2());
8927
8928 MultiPutByOffsetData& data = m_node->multiPutByOffsetData();
8929
8930 Vector<LBasicBlock, 2> blocks(data.variants.size());
8931 for (unsigned i = data.variants.size(); i--;)
8932 blocks[i] = m_out.newBlock();
8933 LBasicBlock exit = m_out.newBlock();
8934 LBasicBlock continuation = m_out.newBlock();
8935
8936 Vector<SwitchCase, 2> cases;
8937 RegisteredStructureSet baseSet;
8938 for (unsigned i = data.variants.size(); i--;) {
8939 PutByIdVariant variant = data.variants[i];
8940 for (unsigned j = variant.oldStructure().size(); j--;) {
8941 RegisteredStructure structure = m_graph.registerStructure(variant.oldStructure()[j]);
8942 baseSet.add(structure);
8943 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
8944 }
8945 }
8946 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
8947 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
8948
8949 LBasicBlock lastNext = m_out.m_nextBlock;
8950
8951 for (unsigned i = data.variants.size(); i--;) {
8952 m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
8953
8954 PutByIdVariant variant = data.variants[i];
8955
8956 LValue storage;
8957 if (variant.kind() == PutByIdVariant::Replace) {
8958 if (isInlineOffset(variant.offset()))
8959 storage = base;
8960 else
8961 storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
8962 } else {
8963 DFG_ASSERT(m_graph, m_node, variant.kind() == PutByIdVariant::Transition, variant.kind());
8964 m_graph.m_plan.transitions().addLazily(
8965 m_origin.semantic.codeOriginOwner(),
8966 variant.oldStructureForTransition(), variant.newStructure());
8967
8968 storage = storageForTransition(
8969 base, variant.offset(),
8970 variant.oldStructureForTransition(), variant.newStructure());
8971 }
8972
8973 storeProperty(value, storage, data.identifierNumber, variant.offset());
8974
8975 if (variant.kind() == PutByIdVariant::Transition) {
8976 ASSERT(variant.oldStructureForTransition()->indexingType() == variant.newStructure()->indexingType());
8977 ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
8978 ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type());
8979 m_out.store32(
8980 weakStructureID(m_graph.registerStructure(variant.newStructure())), base, m_heaps.JSCell_structureID);
8981 }
8982
8983 m_out.jump(continuation);
8984 }
8985
8986 m_out.appendTo(exit, continuation);
8987 if (!structuresChecked)
8988 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
8989 m_out.unreachable();
8990
8991 m_out.appendTo(continuation, lastNext);
8992 }
8993
8994 void compileMultiDeleteByOffset()
8995 {
8996 LValue base = lowCell(m_node->child1());
8997 MultiDeleteByOffsetData& data = m_node->multiDeleteByOffsetData();
8998
8999 unsigned missConfigurable = 0;
9000 unsigned missNonconfigurable = 0;
9001
9002 for (unsigned i = data.variants.size(); i--;) {
9003 DeleteByVariant variant = data.variants[i];
9004 if (!variant.newStructure()) {
9005 if (variant.result())
9006 ++missConfigurable;
9007 else
9008 ++missNonconfigurable;
9009 }
9010 }
9011
9012 unsigned uniqueCaseCount = data.variants.size();
9013 if (missConfigurable)
9014 uniqueCaseCount -= missConfigurable - 1;
9015 if (missNonconfigurable)
9016 uniqueCaseCount -= missNonconfigurable - 1;
9017 int trueBlock = missConfigurable ? uniqueCaseCount - 1 : -1;
9018 int falseBlock = missNonconfigurable ? uniqueCaseCount - 1 - !!missConfigurable : -1;
9019
9020 Vector<LBasicBlock, 2> blocks(uniqueCaseCount);
9021 for (unsigned i = blocks.size(); i--;)
9022 blocks[i] = m_out.newBlock();
9023 LBasicBlock exit = m_out.newBlock();
9024 LBasicBlock continuation = m_out.newBlock();
9025
9026 Vector<SwitchCase, 2> cases;
9027 RegisteredStructureSet baseSet;
9028 for (unsigned i = data.variants.size(), block = 0; i--;) {
9029 DeleteByVariant variant = data.variants[i];
9030 RegisteredStructure structure = m_graph.registerStructure(variant.oldStructure());
9031 baseSet.add(structure);
9032 if (variant.newStructure())
9033 cases.append(SwitchCase(weakStructureID(structure), blocks[block++], Weight(1)));
9034 else
9035 cases.append(SwitchCase(weakStructureID(structure), blocks[variant.result() ? trueBlock : falseBlock], Weight(1)));
9036 }
9037 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
9038 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
9039
9040 LBasicBlock lastNext = m_out.m_nextBlock;
9041
9042 Vector<ValueFromBlock, 2> results;
9043
9044 for (unsigned i = data.variants.size(), block = 0; i--;) {
9045 DeleteByVariant variant = data.variants[i];
9046 if (!variant.newStructure())
9047 continue;
9048
9049 m_out.appendTo(blocks[block], block + 1 < blocks.size() ? blocks[block + 1] : exit);
9050
9051 LValue storage;
9052
9053 if (isInlineOffset(variant.offset()))
9054 storage = base;
9055 else
9056 storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
9057
9058 storeProperty(m_out.int64Zero, storage, data.identifierNumber, variant.offset());
9059
9060 ASSERT(variant.oldStructure()->indexingType() == variant.newStructure()->indexingType());
9061 ASSERT(variant.oldStructure()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
9062 ASSERT(variant.oldStructure()->typeInfo().type() == variant.newStructure()->typeInfo().type());
9063 m_out.store32(
9064 weakStructureID(m_graph.registerStructure(variant.newStructure())), base, m_heaps.JSCell_structureID);
9065
9066 results.append(m_out.anchor(variant.result() ? m_out.booleanTrue : m_out.booleanFalse));
9067 m_out.jump(continuation);
9068 ++block;
9069 }
9070
9071 if (missNonconfigurable) {
9072 m_out.appendTo(blocks[falseBlock]);
9073 results.append(m_out.anchor(m_out.booleanFalse));
9074 m_out.jump(continuation);
9075 }
9076
9077 if (missConfigurable) {
9078 m_out.appendTo(blocks[trueBlock], exit);
9079 results.append(m_out.anchor(m_out.booleanTrue));
9080 m_out.jump(continuation);
9081 }
9082
9083 m_out.appendTo(exit, continuation);
9084 if (!structuresChecked)
9085 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
9086 m_out.unreachable();
9087
9088 m_out.appendTo(continuation, lastNext);
9089 setBoolean(m_out.phi(Int32, results));
9090
9091 if (data.writesStructures()) {
9092 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9093 patchpoint->setGenerator([] (CCallHelpers&, const StackmapGenerationParams&) { });
9094 m_heaps.decoratePatchpointWrite(&m_heaps.JSCellHeaderAndNamedProperties, patchpoint);
9095 }
9096 }
9097
9098 void compileMatchStructure()
9099 {
9100 LValue base = lowCell(m_node->child1());
9101
9102 MatchStructureData& data = m_node->matchStructureData();
9103
9104 LBasicBlock trueBlock = m_out.newBlock();
9105 LBasicBlock falseBlock = m_out.newBlock();
9106 LBasicBlock exitBlock = m_out.newBlock();
9107 LBasicBlock continuation = m_out.newBlock();
9108
9109 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueBlock);
9110
9111 Vector<SwitchCase, 2> cases;
9112 RegisteredStructureSet baseSet;
9113 for (MatchStructureVariant& variant : data.variants) {
9114 baseSet.add(variant.structure);
9115 cases.append(SwitchCase(
9116 weakStructureID(variant.structure),
9117 variant.result ? trueBlock : falseBlock, Weight(1)));
9118 }
9119 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
9120 emitSwitchForMultiByOffset(base, structuresChecked, cases, exitBlock);
9121
9122 m_out.appendTo(trueBlock, falseBlock);
9123 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
9124 m_out.jump(continuation);
9125
9126 m_out.appendTo(falseBlock, exitBlock);
9127 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
9128 m_out.jump(continuation);
9129
9130 m_out.appendTo(exitBlock, continuation);
9131 if (!structuresChecked)
9132 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
9133 m_out.unreachable();
9134
9135 m_out.appendTo(continuation, lastNext);
9136 setBoolean(m_out.phi(Int32, trueResult, falseResult));
9137 }
9138
9139 void compileGetGlobalVariable()
9140 {
9141 setJSValue(m_out.load64(m_out.absolute(m_node->variablePointer())));
9142 }
9143
9144 void compilePutGlobalVariable()
9145 {
9146 m_out.store64(
9147 lowJSValue(m_node->child2()), m_out.absolute(m_node->variablePointer()));
9148 }
9149
9150 void compileNotifyWrite()
9151 {
9152 WatchpointSet* set = m_node->watchpointSet();
9153
9154 LBasicBlock isNotInvalidated = m_out.newBlock();
9155 LBasicBlock continuation = m_out.newBlock();
9156
9157 LValue state = m_out.load8ZeroExt32(m_out.absolute(set->addressOfState()));
9158 m_out.branch(
9159 m_out.equal(state, m_out.constInt32(IsInvalidated)),
9160 usually(continuation), rarely(isNotInvalidated));
9161
9162 LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
9163
9164 VM& vm = this->vm();
9165 lazySlowPath(
9166 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
9167 return createLazyCallGenerator(vm,
9168 operationNotifyWrite, InvalidGPRReg, &vm, CCallHelpers::TrustedImmPtr(set));
9169 });
9170 m_out.jump(continuation);
9171
9172 m_out.appendTo(continuation, lastNext);
9173 }
9174
9175 void compileGetCallee()
9176 {
9177 setJSValue(m_out.loadPtr(addressFor(VirtualRegister(CallFrameSlot::callee))));
9178 }
9179
9180 void compileSetCallee()
9181 {
9182 auto callee = lowCell(m_node->child1());
9183 m_out.storePtr(callee, payloadFor(VirtualRegister(CallFrameSlot::callee)));
9184 }
9185
9186 void compileGetArgumentCountIncludingThis()
9187 {
9188 VirtualRegister argumentCountRegister;
9189 if (InlineCallFrame* inlineCallFrame = m_node->argumentsInlineCallFrame())
9190 argumentCountRegister = inlineCallFrame->argumentCountRegister;
9191 else
9192 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCountIncludingThis);
9193 setInt32(m_out.load32(payloadFor(argumentCountRegister)));
9194 }
9195
9196 void compileSetArgumentCountIncludingThis()
9197 {
9198 m_out.store32(m_out.constInt32(m_node->argumentCountIncludingThis()), payloadFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
9199 }
9200
9201 void compileGetScope()
9202 {
9203 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope));
9204 }
9205
9206 void compileSkipScope()
9207 {
9208 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next));
9209 }
9210
9211 void compileGetGlobalObject()
9212 {
9213 LValue structure = loadStructure(lowCell(m_node->child1()));
9214 setJSValue(m_out.loadPtr(structure, m_heaps.Structure_globalObject));
9215 }
9216
9217 void compileGetGlobalThis()
9218 {
9219 auto* globalObject = m_graph.globalObjectFor(m_origin.semantic);
9220 setJSValue(m_out.loadPtr(m_out.absolute(globalObject->addressOfGlobalThis())));
9221 }
9222
9223 void compileGetClosureVar()
9224 {
9225 setJSValue(
9226 m_out.load64(
9227 lowCell(m_node->child1()),
9228 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]));
9229 }
9230
9231 void compilePutClosureVar()
9232 {
9233 m_out.store64(
9234 lowJSValue(m_node->child2()),
9235 lowCell(m_node->child1()),
9236 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]);
9237 }
9238
9239 void compileGetInternalField()
9240 {
9241 setJSValue(
9242 m_out.load64(
9243 lowCell(m_node->child1()),
9244 m_heaps.JSInternalFieldObjectImpl_internalFields[m_node->internalFieldIndex()]));
9245 }
9246
9247 void compilePutInternalField()
9248 {
9249 m_out.store64(
9250 lowJSValue(m_node->child2()),
9251 lowCell(m_node->child1()),
9252 m_heaps.JSInternalFieldObjectImpl_internalFields[m_node->internalFieldIndex()]);
9253 }
9254
9255 void compileGetFromArguments()
9256 {
9257 setJSValue(
9258 m_out.load64(
9259 lowCell(m_node->child1()),
9260 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]));
9261 }
9262
9263 void compilePutToArguments()
9264 {
9265 m_out.store64(
9266 lowJSValue(m_node->child2()),
9267 lowCell(m_node->child1()),
9268 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]);
9269 }
9270
9271 void compileGetArgument()
9272 {
9273 LValue argumentCount = m_out.load32(payloadFor(AssemblyHelpers::argumentCount(m_origin.semantic)));
9274
9275 LBasicBlock inBounds = m_out.newBlock();
9276 LBasicBlock outOfBounds = m_out.newBlock();
9277 LBasicBlock continuation = m_out.newBlock();
9278
9279 m_out.branch(m_out.lessThanOrEqual(argumentCount, m_out.constInt32(m_node->argumentIndex())), unsure(outOfBounds), unsure(inBounds));
9280
9281 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
9282 VirtualRegister arg = AssemblyHelpers::argumentsStart(m_origin.semantic) + m_node->argumentIndex() - 1;
9283 ValueFromBlock inBoundsResult = m_out.anchor(m_out.load64(addressFor(arg)));
9284 m_out.jump(continuation);
9285
9286 m_out.appendTo(outOfBounds, continuation);
9287 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(JSValue::ValueUndefined));
9288 m_out.jump(continuation);
9289
9290 m_out.appendTo(continuation, lastNext);
9291 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
9292 }
9293
9294 void compileCompareEq()
9295 {
9296 if (m_node->isBinaryUseKind(Int32Use)
9297 || m_node->isBinaryUseKind(Int52RepUse)
9298 || m_node->isBinaryUseKind(DoubleRepUse)
9299 || m_node->isBinaryUseKind(ObjectUse)
9300 || m_node->isBinaryUseKind(BooleanUse)
9301 || m_node->isBinaryUseKind(SymbolUse)
9302 || m_node->isBinaryUseKind(StringIdentUse)
9303 || m_node->isBinaryUseKind(StringUse)
9304 || m_node->isBinaryUseKind(BigInt32Use)
9305 || m_node->isBinaryUseKind(HeapBigIntUse)
9306 || m_node->isBinaryUseKind(AnyBigIntUse)) {
9307 compileCompareStrictEq();
9308 return;
9309 }
9310
9311 if (m_node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
9312 compareEqObjectOrOtherToObject(m_node->child2(), m_node->child1());
9313 return;
9314 }
9315
9316 if (m_node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
9317 compareEqObjectOrOtherToObject(m_node->child1(), m_node->child2());
9318 return;
9319 }
9320
9321 if (m_node->child1().useKind() == KnownOtherUse) {
9322 ASSERT(!m_interpreter.needsTypeCheck(m_node->child1(), SpecOther));
9323 setBoolean(equalNullOrUndefined(m_node->child2(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
9324 return;
9325 }
9326
9327 if (m_node->child2().useKind() == KnownOtherUse) {
9328 ASSERT(!m_interpreter.needsTypeCheck(m_node->child2(), SpecOther));
9329 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
9330 return;
9331 }
9332
9333 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
9334 genericJSValueCompare(
9335 [&] (LValue left, LValue right) {
9336 return m_out.equal(left, right);
9337 },
9338 operationCompareEq);
9339 }
9340
9341 void compileCompareStrictEq()
9342 {
9343 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
9344 if (m_node->isBinaryUseKind(Int32Use)) {
9345 setBoolean(
9346 m_out.equal(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
9347 return;
9348 }
9349
9350#if USE(BIGINT32)
9351 if (m_node->isBinaryUseKind(BigInt32Use)) {
9352 LValue left = lowBigInt32(m_node->child1());
9353 LValue right = lowBigInt32(m_node->child2());
9354
9355 // No need to unbox since the tag bits are the same on both sides
9356 LValue result = m_out.equal(left, right);
9357 setBoolean(result);
9358 return;
9359 }
9360
9361 if (m_node->isBinaryUseKind(AnyBigIntUse)) {
9362 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
9363 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
9364
9365 // Note that we cannot start with if (left == right), because we must insert the right checks (see ManualOperandSpeculation above)
9366 // The code that we generate looks like the following pseudo-code:
9367 /*
9368 if (isBigInt32(left)) {
9369 if (isBigInt32(right))
9370 return left == right;
9371 CHECK(isHeapBigInt(right));
9372 return call(JSBigInt::equalsToInt32(right, unboxed(left));
9373 }
9374 CHECK(isHeapBigInt(left))
9375 if (left == right)
9376 return true;
9377 if (isBigInt32(right))
9378 return call(JSBigInt::equalsToInt32(left, unboxed(right));
9379 CHECK(isHeapBigInt(right));
9380 return call(JSBigInt::equals(left, right));
9381 */
9382 LBasicBlock leftIsBigInt32 = m_out.newBlock();
9383 LBasicBlock bothAreBigInt32 = m_out.newBlock();
9384 LBasicBlock onlyLeftIsBigInt32 = m_out.newBlock();
9385 LBasicBlock leftIsNotBigInt32 = m_out.newBlock();
9386 LBasicBlock leftEqualsRight = m_out.newBlock();
9387 LBasicBlock leftIsHeapBigInt = m_out.newBlock();
9388 LBasicBlock rightIsBigInt32 = m_out.newBlock();
9389 LBasicBlock rightIsNotBigInt32 = m_out.newBlock();
9390 LBasicBlock continuation = m_out.newBlock();
9391
9392 // Inserts a check that a value is a HeapBigInt, assuming only that we know it is not a BigInt32
9393 auto checkIsHeapBigInt = [&](LValue lowValue, Edge highValue) {
9394 if (m_interpreter.needsTypeCheck(highValue, SpecHeapBigInt)) {
9395 ASSERT(mayHaveTypeCheck(highValue.useKind()));
9396 LValue checkFailed = isNotHeapBigIntUnknownWhetherCell(lowValue, provenType(highValue) & ~SpecBigInt32);
9397 appendOSRExit(BadType, jsValueValue(lowValue), highValue.node(), checkFailed, m_origin);
9398 }
9399 };
9400
9401 m_out.branch(isBigInt32(left, provenType(m_node->child1())), unsure(leftIsBigInt32), unsure(leftIsNotBigInt32));
9402
9403 LBasicBlock lastNext = m_out.appendTo(leftIsBigInt32, bothAreBigInt32);
9404 m_out.branch(isBigInt32(right, provenType(m_node->child2())), unsure(bothAreBigInt32), unsure(onlyLeftIsBigInt32));
9405
9406 m_out.appendTo(bothAreBigInt32, onlyLeftIsBigInt32);
9407 ValueFromBlock resultBothAreBigInt32 = m_out.anchor(m_out.equal(left, right));
9408 m_out.jump(continuation);
9409
9410 m_out.appendTo(onlyLeftIsBigInt32, leftIsNotBigInt32);
9411 checkIsHeapBigInt(right, m_node->child2());
9412 LValue unboxedLeft = unboxBigInt32(left);
9413 ValueFromBlock resultLeftIsBigInt32 = m_out.anchor(m_out.notNull(vmCall(pointerType(), operationCompareEqHeapBigIntToInt32, weakPointer(globalObject), right, unboxedLeft)));
9414 m_out.jump(continuation);
9415
9416 m_out.appendTo(leftIsNotBigInt32, leftEqualsRight);
9417 checkIsHeapBigInt(left, m_node->child1());
9418 m_out.branch(m_out.equal(left, right), unsure(leftEqualsRight), unsure(leftIsHeapBigInt));
9419
9420 m_out.appendTo(leftEqualsRight, leftIsHeapBigInt);
9421 ValueFromBlock resultLeftEqualsRight = m_out.anchor(m_out.booleanTrue);
9422 m_out.jump(continuation);
9423
9424 m_out.appendTo(leftIsHeapBigInt, rightIsBigInt32);
9425 m_out.branch(isBigInt32(right, provenType(m_node->child2())), unsure(rightIsBigInt32), unsure(rightIsNotBigInt32));
9426
9427 m_out.appendTo(rightIsBigInt32, rightIsNotBigInt32);
9428 LValue unboxedRight = unboxBigInt32(right);
9429 ValueFromBlock resultRightIsBigInt32 = m_out.anchor(m_out.notNull(vmCall(pointerType(), operationCompareEqHeapBigIntToInt32, weakPointer(globalObject), left, unboxedRight)));
9430 m_out.jump(continuation);
9431
9432 m_out.appendTo(rightIsNotBigInt32, continuation);
9433 checkIsHeapBigInt(right, m_node->child2());
9434 // FIXME: [ESNext][BigInt] Create specialized version of strict equals for big ints
9435 // https://bugs.webkit.org/show_bug.cgi?id=182895
9436 ValueFromBlock resultBothHeapBigInt = m_out.anchor(m_out.notNull(vmCall(pointerType(), operationCompareStrictEq, weakPointer(globalObject), left, right)));
9437 m_out.jump(continuation);
9438
9439 m_out.appendTo(continuation, lastNext);
9440 setBoolean(m_out.phi(Int32, resultBothAreBigInt32, resultLeftIsBigInt32, resultLeftEqualsRight, resultRightIsBigInt32, resultBothHeapBigInt));
9441
9442 m_interpreter.filter(m_node->child1(), SpecBigInt);
9443 m_interpreter.filter(m_node->child2(), SpecBigInt);
9444 return;
9445 }
9446#endif // USE(BIGINT32)
9447
9448 if (m_node->isBinaryUseKind(Int52RepUse)) {
9449 Int52Kind kind;
9450 LValue left = lowWhicheverInt52(m_node->child1(), kind);
9451 LValue right = lowInt52(m_node->child2(), kind);
9452 setBoolean(m_out.equal(left, right));
9453 return;
9454 }
9455
9456 if (m_node->isBinaryUseKind(DoubleRepUse)) {
9457 setBoolean(
9458 m_out.doubleEqual(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
9459 return;
9460 }
9461
9462 if (m_node->isBinaryUseKind(StringIdentUse)) {
9463 setBoolean(
9464 m_out.equal(lowStringIdent(m_node->child1()), lowStringIdent(m_node->child2())));
9465 return;
9466 }
9467
9468 if (m_node->isBinaryUseKind(StringUse)) {
9469 LValue left = lowCell(m_node->child1());
9470 LValue right = lowCell(m_node->child2());
9471
9472 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
9473 LBasicBlock continuation = m_out.newBlock();
9474
9475 speculateString(m_node->child1(), left);
9476
9477 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
9478 m_out.branch(
9479 m_out.equal(left, right), unsure(continuation), unsure(notTriviallyEqualCase));
9480
9481 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
9482
9483 speculateString(m_node->child2(), right);
9484
9485 ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right, m_node->child1(), m_node->child2()));
9486 m_out.jump(continuation);
9487
9488 m_out.appendTo(continuation, lastNext);
9489 setBoolean(m_out.phi(Int32, fastResult, slowResult));
9490 return;
9491 }
9492
9493 if (m_node->isBinaryUseKind(ObjectUse, UntypedUse)) {
9494 setBoolean(
9495 m_out.equal(
9496 lowNonNullObject(m_node->child1()),
9497 lowJSValue(m_node->child2())));
9498 return;
9499 }
9500
9501 if (m_node->isBinaryUseKind(UntypedUse, ObjectUse)) {
9502 setBoolean(
9503 m_out.equal(
9504 lowNonNullObject(m_node->child2()),
9505 lowJSValue(m_node->child1())));
9506 return;
9507 }
9508
9509 if (m_node->isBinaryUseKind(ObjectUse)) {
9510 setBoolean(
9511 m_out.equal(
9512 lowNonNullObject(m_node->child1()),
9513 lowNonNullObject(m_node->child2())));
9514 return;
9515 }
9516
9517 if (m_node->isBinaryUseKind(BooleanUse)) {
9518 setBoolean(
9519 m_out.equal(lowBoolean(m_node->child1()), lowBoolean(m_node->child2())));
9520 return;
9521 }
9522
9523 if (m_node->isBinaryUseKind(SymbolUse)) {
9524 LValue leftSymbol = lowSymbol(m_node->child1());
9525 LValue rightSymbol = lowSymbol(m_node->child2());
9526 setBoolean(m_out.equal(leftSymbol, rightSymbol));
9527 return;
9528 }
9529
9530 if (m_node->isBinaryUseKind(HeapBigIntUse)) {
9531 // FIXME: [ESNext][BigInt] Create specialized version of strict equals for big ints
9532 // https://bugs.webkit.org/show_bug.cgi?id=182895
9533 LValue left = lowHeapBigInt(m_node->child1());
9534 LValue right = lowHeapBigInt(m_node->child2());
9535
9536 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
9537 LBasicBlock continuation = m_out.newBlock();
9538
9539 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
9540 m_out.branch(m_out.equal(left, right), rarely(continuation), usually(notTriviallyEqualCase));
9541
9542 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
9543
9544 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
9545 pointerType(), operationCompareStrictEq, weakPointer(globalObject), left, right)));
9546 m_out.jump(continuation);
9547
9548 m_out.appendTo(continuation, lastNext);
9549 setBoolean(m_out.phi(Int32, fastResult, slowResult));
9550 return;
9551 }
9552
9553 if (m_node->isBinaryUseKind(SymbolUse, UntypedUse)
9554 || m_node->isBinaryUseKind(UntypedUse, SymbolUse)) {
9555 Edge symbolEdge = m_node->child1();
9556 Edge untypedEdge = m_node->child2();
9557 if (symbolEdge.useKind() != SymbolUse)
9558 std::swap(symbolEdge, untypedEdge);
9559
9560 LValue leftSymbol = lowSymbol(symbolEdge);
9561 LValue untypedValue = lowJSValue(untypedEdge);
9562
9563 setBoolean(m_out.equal(leftSymbol, untypedValue));
9564 return;
9565 }
9566
9567 if (m_node->isBinaryUseKind(MiscUse, UntypedUse)
9568 || m_node->isBinaryUseKind(UntypedUse, MiscUse)
9569#if !USE(BIGINT32)
9570 || m_node->isBinaryUseKind(NotDoubleUse, NeitherDoubleNorHeapBigIntNorStringUse)
9571 || m_node->isBinaryUseKind(NeitherDoubleNorHeapBigIntNorStringUse, NotDoubleUse)) {
9572#else
9573 ) {
9574#endif
9575 speculate(m_node->child1());
9576 speculate(m_node->child2());
9577 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
9578 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
9579 setBoolean(m_out.equal(left, right));
9580 return;
9581 }
9582
9583 if (m_node->isBinaryUseKind(StringIdentUse, NotStringVarUse)
9584 || m_node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
9585 Edge leftEdge = m_node->childFor(StringIdentUse);
9586 Edge rightEdge = m_node->childFor(NotStringVarUse);
9587
9588 LValue left = lowStringIdent(leftEdge);
9589 LValue rightValue = lowJSValue(rightEdge, ManualOperandSpeculation);
9590
9591 LBasicBlock isCellCase = m_out.newBlock();
9592 LBasicBlock isStringCase = m_out.newBlock();
9593 LBasicBlock continuation = m_out.newBlock();
9594
9595 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9596 m_out.branch(
9597 isCell(rightValue, provenType(rightEdge)),
9598 unsure(isCellCase), unsure(continuation));
9599
9600 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9601 ValueFromBlock notStringResult = m_out.anchor(m_out.booleanFalse);
9602 m_out.branch(
9603 isString(rightValue, provenType(rightEdge)),
9604 unsure(isStringCase), unsure(continuation));
9605
9606 m_out.appendTo(isStringCase, continuation);
9607 LValue right = m_out.loadPtr(rightValue, m_heaps.JSString_value);
9608 speculateStringIdent(rightEdge, rightValue, right);
9609 ValueFromBlock isStringResult = m_out.anchor(m_out.equal(left, right));
9610 m_out.jump(continuation);
9611
9612 m_out.appendTo(continuation, lastNext);
9613 setBoolean(m_out.phi(Int32, notCellResult, notStringResult, isStringResult));
9614 return;
9615 }
9616
9617 if (m_node->isBinaryUseKind(StringUse, UntypedUse)) {
9618 compileStringToUntypedStrictEquality(m_node->child1(), m_node->child2());
9619 return;
9620 }
9621 if (m_node->isBinaryUseKind(UntypedUse, StringUse)) {
9622 compileStringToUntypedStrictEquality(m_node->child2(), m_node->child1());
9623 return;
9624 }
9625
9626 // FIXME: we can do something much smarter here, see the DFGSpeculativeJIT approach in e.g. SpeculativeJIT::nonSpeculativePeepholeStrictEq
9627 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
9628 genericJSValueCompare(
9629 [&] (LValue left, LValue right) {
9630 return m_out.equal(left, right);
9631 },
9632 operationCompareStrictEq);
9633 }
9634
9635 void compileStringToUntypedStrictEquality(Edge stringEdge, Edge untypedEdge)
9636 {
9637 ASSERT(stringEdge.useKind() == StringUse);
9638 ASSERT(untypedEdge.useKind() == UntypedUse);
9639
9640 LValue leftString = lowCell(stringEdge);
9641 LValue rightValue = lowJSValue(untypedEdge);
9642 SpeculatedType rightValueType = provenType(untypedEdge);
9643
9644 // Verify left is string.
9645 speculateString(stringEdge, leftString);
9646
9647 LBasicBlock testUntypedEdgeIsCell = m_out.newBlock();
9648 LBasicBlock testUntypedEdgeIsString = m_out.newBlock();
9649 LBasicBlock testStringEquality = m_out.newBlock();
9650 LBasicBlock continuation = m_out.newBlock();
9651
9652 // Given left is string. If the value are strictly equal, rightValue has to be the same string.
9653 ValueFromBlock fastTrue = m_out.anchor(m_out.booleanTrue);
9654 m_out.branch(m_out.equal(leftString, rightValue), unsure(continuation), unsure(testUntypedEdgeIsCell));
9655
9656 LBasicBlock lastNext = m_out.appendTo(testUntypedEdgeIsCell, testUntypedEdgeIsString);
9657 ValueFromBlock fastFalse = m_out.anchor(m_out.booleanFalse);
9658 m_out.branch(isNotCell(rightValue, rightValueType), unsure(continuation), unsure(testUntypedEdgeIsString));
9659
9660 // Check if the untyped edge is a string.
9661 m_out.appendTo(testUntypedEdgeIsString, testStringEquality);
9662 m_out.branch(isNotString(rightValue, rightValueType), unsure(continuation), unsure(testStringEquality));
9663
9664 // Full String compare.
9665 m_out.appendTo(testStringEquality, continuation);
9666 ValueFromBlock slowResult = m_out.anchor(stringsEqual(leftString, rightValue, stringEdge, untypedEdge));
9667 m_out.jump(continuation);
9668
9669 // Continuation.
9670 m_out.appendTo(continuation, lastNext);
9671 setBoolean(m_out.phi(Int32, fastTrue, fastFalse, slowResult));
9672 }
9673
9674 void compileCompareEqPtr()
9675 {
9676 setBoolean(
9677 m_out.equal(
9678 lowJSValue(m_node->child1()),
9679 weakPointer(m_node->cellOperand()->cell())));
9680 }
9681
9682 void compileCompareLess()
9683 {
9684 compare(
9685 [&] (LValue left, LValue right) {
9686 return m_out.lessThan(left, right);
9687 },
9688 [&] (LValue left, LValue right) {
9689 return m_out.doubleLessThan(left, right);
9690 },
9691 operationCompareStringImplLess,
9692 operationCompareStringLess,
9693 operationCompareLess);
9694 }
9695
9696 void compileCompareLessEq()
9697 {
9698 compare(
9699 [&] (LValue left, LValue right) {
9700 return m_out.lessThanOrEqual(left, right);
9701 },
9702 [&] (LValue left, LValue right) {
9703 return m_out.doubleLessThanOrEqual(left, right);
9704 },
9705 operationCompareStringImplLessEq,
9706 operationCompareStringLessEq,
9707 operationCompareLessEq);
9708 }
9709
9710 void compileCompareGreater()
9711 {
9712 compare(
9713 [&] (LValue left, LValue right) {
9714 return m_out.greaterThan(left, right);
9715 },
9716 [&] (LValue left, LValue right) {
9717 return m_out.doubleGreaterThan(left, right);
9718 },
9719 operationCompareStringImplGreater,
9720 operationCompareStringGreater,
9721 operationCompareGreater);
9722 }
9723
9724 void compileCompareGreaterEq()
9725 {
9726 compare(
9727 [&] (LValue left, LValue right) {
9728 return m_out.greaterThanOrEqual(left, right);
9729 },
9730 [&] (LValue left, LValue right) {
9731 return m_out.doubleGreaterThanOrEqual(left, right);
9732 },
9733 operationCompareStringImplGreaterEq,
9734 operationCompareStringGreaterEq,
9735 operationCompareGreaterEq);
9736 }
9737
9738 void compileCompareBelow()
9739 {
9740 setBoolean(m_out.below(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
9741 }
9742
9743 void compileCompareBelowEq()
9744 {
9745 setBoolean(m_out.belowOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
9746 }
9747
9748 void compileSameValue()
9749 {
9750 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
9751 if (m_node->isBinaryUseKind(DoubleRepUse)) {
9752 LValue arg1 = lowDouble(m_node->child1());
9753 LValue arg2 = lowDouble(m_node->child2());
9754
9755 LBasicBlock numberCase = m_out.newBlock();
9756 LBasicBlock continuation = m_out.newBlock();
9757
9758 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9759 patchpoint->append(arg1, ValueRep::SomeRegister);
9760 patchpoint->append(arg2, ValueRep::SomeRegister);
9761 patchpoint->numGPScratchRegisters = 1;
9762 patchpoint->setGenerator(
9763 [] (CCallHelpers& jit, const StackmapGenerationParams& params) {
9764 GPRReg scratchGPR = params.gpScratch(0);
9765 jit.moveDoubleTo64(params[1].fpr(), scratchGPR);
9766 jit.moveDoubleTo64(params[2].fpr(), params[0].gpr());
9767 jit.compare64(CCallHelpers::Equal, scratchGPR, params[0].gpr(), params[0].gpr());
9768 });
9769 patchpoint->effects = Effects::none();
9770 ValueFromBlock compareResult = m_out.anchor(patchpoint);
9771 m_out.branch(patchpoint, unsure(continuation), unsure(numberCase));
9772
9773 LBasicBlock lastNext = m_out.appendTo(numberCase, continuation);
9774 LValue isArg1NaN = m_out.doubleNotEqualOrUnordered(arg1, arg1);
9775 LValue isArg2NaN = m_out.doubleNotEqualOrUnordered(arg2, arg2);
9776 ValueFromBlock nanResult = m_out.anchor(m_out.bitAnd(isArg1NaN, isArg2NaN));
9777 m_out.jump(continuation);
9778
9779 m_out.appendTo(continuation, lastNext);
9780 setBoolean(m_out.phi(Int32, compareResult, nanResult));
9781 return;
9782 }
9783
9784 ASSERT(m_node->isBinaryUseKind(UntypedUse));
9785 setBoolean(vmCall(Int32, operationSameValue, weakPointer(globalObject), lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
9786 }
9787
9788 void compileToBoolean()
9789 {
9790 setBoolean(boolify(m_node->child1()));
9791 }
9792
9793 void compileLogicalNot()
9794 {
9795 setBoolean(m_out.logicalNot(boolify(m_node->child1())));
9796 }
9797
9798 void compileCallOrConstruct()
9799 {
9800 Node* node = m_node;
9801 unsigned numArgs = node->numChildren() - 1;
9802
9803 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
9804
9805 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
9806 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
9807
9808 // JS->JS calling convention requires that the caller allows this much space on top of stack to
9809 // get trashed by the callee, even if not all of that space is used to pass arguments. We tell
9810 // B3 this explicitly for two reasons:
9811 //
9812 // - We will only pass frameSize worth of stuff.
9813 // - The trashed stack guarantee is logically separate from the act of passing arguments, so we
9814 // shouldn't rely on Air to infer the trashed stack property based on the arguments it ends
9815 // up seeing.
9816 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
9817
9818 // Collect the arguments, since this can generate code and we want to generate it before we emit
9819 // the call.
9820 Vector<ConstrainedValue> arguments;
9821
9822 // Make sure that the callee goes into GPR0 because that's where the slow path thunks expect the
9823 // callee to be.
9824 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
9825
9826 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
9827 intptr_t offsetFromSP =
9828 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
9829 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
9830 };
9831
9832 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
9833 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCountIncludingThis), PayloadOffset);
9834 for (unsigned i = 0; i < numArgs; ++i)
9835 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgumentIncludingThis(i), 0);
9836
9837 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
9838 patchpoint->appendVector(arguments);
9839
9840 RefPtr<PatchpointExceptionHandle> exceptionHandle =
9841 preparePatchpointForExceptions(patchpoint);
9842
9843 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
9844 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
9845 patchpoint->clobber(RegisterSet::macroScratchRegisters());
9846 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
9847 patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
9848
9849 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
9850 State* state = &m_ftlState;
9851 VM* vm = &this->vm();
9852 CodeOrigin nodeSemanticOrigin = node->origin.semantic;
9853 auto nodeOp = node->op();
9854 patchpoint->setGenerator(
9855 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
9856 AllowMacroScratchRegisterUsage allowScratch(jit);
9857 CallSiteIndex callSiteIndex = state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(codeOrigin);
9858
9859 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
9860
9861 jit.store32(
9862 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
9863 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
9864
9865 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(nodeSemanticOrigin);
9866 callLinkInfo->setUpCall(
9867 nodeOp == Construct ? CallLinkInfo::Construct : CallLinkInfo::Call, GPRInfo::regT0);
9868
9869 auto slowPath = callLinkInfo->emitFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No);
9870 CCallHelpers::Jump done = jit.jump();
9871
9872 slowPath.link(&jit);
9873 auto slowPathStart = jit.label();
9874 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()->globalObjectFor(nodeSemanticOrigin)), GPRInfo::regT3);
9875 callLinkInfo->emitSlowPath(*vm, jit);
9876
9877 done.link(&jit);
9878
9879 auto doneLocation = jit.label();
9880
9881 jit.addPtr(
9882 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
9883 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
9884
9885 jit.addLinkTask(
9886 [=] (LinkBuffer& linkBuffer) {
9887 callLinkInfo->setCodeLocations(
9888 linkBuffer.locationOf<JSInternalPtrTag>(slowPathStart),
9889 linkBuffer.locationOf<JSInternalPtrTag>(doneLocation));
9890 });
9891 });
9892
9893 setJSValue(patchpoint);
9894 }
9895
9896 void compileDirectCallOrConstruct()
9897 {
9898 Node* node = m_node;
9899 bool isTail = node->op() == DirectTailCall;
9900 bool isConstruct = node->op() == DirectConstruct;
9901
9902 ExecutableBase* executable = node->castOperand<ExecutableBase*>();
9903 FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(vm(), executable);
9904
9905 unsigned numPassedArgs = node->numChildren() - 1;
9906 unsigned numAllocatedArgs = numPassedArgs;
9907
9908 if (functionExecutable) {
9909 numAllocatedArgs = std::max(
9910 numAllocatedArgs,
9911 std::min(
9912 static_cast<unsigned>(functionExecutable->parameterCount()) + 1,
9913 Options::maximumDirectCallStackSize()));
9914 }
9915
9916 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
9917
9918 if (!isTail) {
9919 unsigned frameSize = (CallFrame::headerSizeInRegisters + numAllocatedArgs) * sizeof(EncodedJSValue);
9920 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
9921
9922 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
9923 }
9924
9925 Vector<ConstrainedValue> arguments;
9926
9927 arguments.append(ConstrainedValue(jsCallee, ValueRep::SomeRegister));
9928 if (!isTail) {
9929 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
9930 intptr_t offsetFromSP =
9931 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
9932 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
9933 };
9934
9935 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
9936 addArgument(m_out.constInt32(numPassedArgs), VirtualRegister(CallFrameSlot::argumentCountIncludingThis), PayloadOffset);
9937 for (unsigned i = 0; i < numPassedArgs; ++i)
9938 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgumentIncludingThis(i), 0);
9939 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
9940 addArgument(m_out.constInt64(JSValue::encode(jsUndefined())), virtualRegisterForArgumentIncludingThis(i), 0);
9941 } else {
9942 for (unsigned i = 0; i < numPassedArgs; ++i)
9943 arguments.append(ConstrainedValue(lowJSValue(m_graph.varArgChild(node, 1 + i)), ValueRep::WarmAny));
9944 }
9945
9946 PatchpointValue* patchpoint = m_out.patchpoint(isTail ? Void : Int64);
9947 patchpoint->appendVector(arguments);
9948
9949 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
9950
9951 if (isTail) {
9952 // The shuffler needs tags.
9953 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
9954 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
9955 }
9956
9957 patchpoint->clobber(RegisterSet::macroScratchRegisters());
9958 if (!isTail) {
9959 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
9960 patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
9961 }
9962
9963 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
9964 CodeOrigin semanticNodeOrigin = node->origin.semantic;
9965 State* state = &m_ftlState;
9966 patchpoint->setGenerator(
9967 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
9968 AllowMacroScratchRegisterUsage allowScratch(jit);
9969 CallSiteIndex callSiteIndex = state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(codeOrigin);
9970
9971 GPRReg calleeGPR = params[!isTail].gpr();
9972
9973 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
9974
9975 Box<CCallHelpers::JumpList> exceptions =
9976 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
9977
9978 if (isTail) {
9979 CallFrameShuffleData shuffleData;
9980 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
9981
9982 RegisterSet toSave = params.unavailableRegisters();
9983 shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatCell);
9984 toSave.set(calleeGPR);
9985 for (unsigned i = 0; i < numPassedArgs; ++i) {
9986 ValueRecovery recovery = params[1 + i].recoveryForJSValue();
9987 shuffleData.args.append(recovery);
9988 recovery.forEachReg(
9989 [&] (Reg reg) {
9990 toSave.set(reg);
9991 });
9992 }
9993 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
9994 shuffleData.args.append(ValueRecovery::constant(jsUndefined()));
9995 shuffleData.numPassedArgs = numPassedArgs;
9996 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
9997
9998 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(semanticNodeOrigin);
9999 callLinkInfo->setUpCall(CallLinkInfo::DirectTailCall, InvalidGPRReg);
10000
10001 CCallHelpers::Label mainPath = jit.label();
10002 jit.store32(
10003 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
10004 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
10005 callLinkInfo->emitDirectTailCallFastPath(jit, [&] {
10006 callLinkInfo->setFrameShuffleData(shuffleData);
10007 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
10008 });
10009
10010 jit.abortWithReason(JITDidReturnFromTailCall);
10011
10012 CCallHelpers::Label slowPath = jit.label();
10013 callOperation(
10014 *state, toSave, jit,
10015 semanticNodeOrigin, exceptions.get(), operationLinkDirectCall,
10016 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo), calleeGPR).call();
10017 jit.jump().linkTo(mainPath, &jit);
10018 callLinkInfo->setExecutableDuringCompilation(executable);
10019 if (numAllocatedArgs > numPassedArgs)
10020 callLinkInfo->setMaxArgumentCountIncludingThis(numAllocatedArgs);
10021
10022 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
10023 callLinkInfo->setCodeLocations(
10024 linkBuffer.locationOf<JSInternalPtrTag>(slowPath),
10025 CodeLocationLabel<JSInternalPtrTag>());
10026 });
10027 return;
10028 }
10029
10030 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(semanticNodeOrigin);
10031 callLinkInfo->setUpCall(
10032 isConstruct ? CallLinkInfo::DirectConstruct : CallLinkInfo::DirectCall, InvalidGPRReg);
10033
10034 CCallHelpers::Label mainPath = jit.label();
10035 jit.store32(
10036 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
10037 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
10038 callLinkInfo->emitDirectFastPath(jit);
10039 jit.addPtr(
10040 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
10041 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
10042
10043 callLinkInfo->setExecutableDuringCompilation(executable);
10044 if (numAllocatedArgs > numPassedArgs)
10045 callLinkInfo->setMaxArgumentCountIncludingThis(numAllocatedArgs);
10046
10047 params.addLatePath(
10048 [=] (CCallHelpers& jit) {
10049 AllowMacroScratchRegisterUsage allowScratch(jit);
10050
10051 CCallHelpers::Label slowPath = jit.label();
10052 if (isX86())
10053 jit.pop(CCallHelpers::selectScratchGPR(calleeGPR));
10054
10055 callOperation(
10056 *state, params.unavailableRegisters(), jit,
10057 semanticNodeOrigin, exceptions.get(), operationLinkDirectCall,
10058 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo),
10059 calleeGPR).call();
10060 jit.jump().linkTo(mainPath, &jit);
10061
10062 jit.addLinkTask(
10063 [=] (LinkBuffer& linkBuffer) {
10064 callLinkInfo->setCodeLocations(
10065 linkBuffer.locationOf<JSInternalPtrTag>(slowPath),
10066 CodeLocationLabel<JSInternalPtrTag>());
10067 });
10068 });
10069 });
10070
10071 if (isTail)
10072 patchpoint->effects.terminal = true;
10073 else
10074 setJSValue(patchpoint);
10075 }
10076
10077 void compileTailCall()
10078 {
10079 Node* node = m_node;
10080 unsigned numArgs = node->numChildren() - 1;
10081
10082 // It seems counterintuitive that this is needed given that tail calls don't create a new frame
10083 // on the stack. However, the tail call slow path builds the frame at SP instead of FP before
10084 // calling into the slow path C code. This slow path may decide to throw an exception because
10085 // the callee we're trying to call is not callable. Throwing an exception will cause us to walk
10086 // the stack, which may read, for the sake of the correctness of this code, arbitrary slots on the
10087 // stack to recover state. This call arg area ensures the call frame shuffler does not overwrite
10088 // any of the slots the stack walking code requires when on the slow path.
10089 m_proc.requestCallArgAreaSizeInBytes(
10090 WTF::roundUpToMultipleOf(stackAlignmentBytes(), (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue)));
10091
10092 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
10093
10094 // We want B3 to give us all of the arguments using whatever mechanism it thinks is
10095 // convenient. The generator then shuffles those arguments into our own call frame,
10096 // destroying our frame in the process.
10097
10098 // Note that we don't have to do anything special for exceptions. A tail call is only a
10099 // tail call if it is not inside a try block.
10100
10101 Vector<ConstrainedValue> arguments;
10102
10103 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
10104
10105 for (unsigned i = 0; i < numArgs; ++i) {
10106 // Note: we could let the shuffler do boxing for us, but it's not super clear that this
10107 // would be better. Also, if we wanted to do that, then we'd have to teach the shuffler
10108 // that 32-bit values could land at 4-byte alignment but not 8-byte alignment.
10109
10110 ConstrainedValue constrainedValue(
10111 lowJSValue(m_graph.varArgChild(node, 1 + i)),
10112 ValueRep::WarmAny);
10113 arguments.append(constrainedValue);
10114 }
10115
10116 PatchpointValue* patchpoint = m_out.patchpoint(Void);
10117 patchpoint->appendVector(arguments);
10118
10119 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
10120 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
10121
10122 // Prevent any of the arguments from using the scratch register.
10123 patchpoint->clobberEarly(RegisterSet::macroScratchRegisters());
10124
10125 patchpoint->effects.terminal = true;
10126
10127 // We don't have to tell the patchpoint that we will clobber registers, since we won't return
10128 // anyway.
10129
10130 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
10131 State* state = &m_ftlState;
10132 VM* vm = &this->vm();
10133 CodeOrigin semanticNodeOrigin = node->origin.semantic;
10134 patchpoint->setGenerator(
10135 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10136 AllowMacroScratchRegisterUsage allowScratch(jit);
10137 CallSiteIndex callSiteIndex = state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(codeOrigin);
10138
10139 // Yes, this is really necessary. You could throw an exception in a host call on the
10140 // slow path. That'll route us to operationLookupExceptionHandler(), which unwinds starting
10141 // with the call site index of our frame. Bad things happen if it's not set.
10142 jit.store32(
10143 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
10144 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
10145
10146 CallFrameShuffleData shuffleData;
10147 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
10148 shuffleData.callee = ValueRecovery::inGPR(GPRInfo::regT0, DataFormatJS);
10149
10150 for (unsigned i = 0; i < numArgs; ++i)
10151 shuffleData.args.append(params[1 + i].recoveryForJSValue());
10152
10153 shuffleData.numPassedArgs = numArgs;
10154
10155 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
10156
10157 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(codeOrigin);
10158 callLinkInfo->setUpCall(CallLinkInfo::TailCall, GPRInfo::regT0);
10159
10160 auto slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No, [&] {
10161 callLinkInfo->setFrameShuffleData(shuffleData);
10162 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
10163 });
10164
10165 slowPath.link(&jit);
10166 auto slowPathStart = jit.label();
10167 CallFrameShuffler slowPathShuffler(jit, shuffleData);
10168 slowPathShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
10169 slowPathShuffler.prepareForSlowPath();
10170
10171 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()->globalObjectFor(semanticNodeOrigin)), GPRInfo::regT3);
10172 callLinkInfo->emitSlowPath(*vm, jit);
10173
10174 auto doneLocation = jit.label();
10175 jit.abortWithReason(JITDidReturnFromTailCall);
10176
10177 jit.addLinkTask(
10178 [=] (LinkBuffer& linkBuffer) {
10179 callLinkInfo->setCodeLocations(
10180 linkBuffer.locationOf<JSInternalPtrTag>(slowPathStart),
10181 linkBuffer.locationOf<JSInternalPtrTag>(doneLocation));
10182 });
10183 });
10184 }
10185
10186 struct CapturedForPhantomNewArrayWithSpreadCase {
10187 unsigned parameterOffset;
10188 };
10189 struct CapturedForPhantomNewArrayBufferCase {
10190 int64_t value;
10191 int32_t currentStoreOffset;
10192 };
10193 struct CapturedForPhantomNewArrayBufferEnd {
10194 unsigned arrayLength;
10195 };
10196 struct CapturedForPhantomCreateRest {
10197 InlineCallFrame* inlineCallFrame;
10198 unsigned numberOfArgumentsToSkip;
10199 unsigned parameterOffset;
10200 };
10201 struct VarargsSpreadArgumentToEmit {
10202 enum Type {
10203 PhantomNewArrayWithSpreadCase,
10204 PhantomNewArrayBufferCase,
10205 PhantomNewArrayBufferEnd,
10206 PhantomCreateRest
10207 } m_type;
10208 union {
10209 CapturedForPhantomNewArrayWithSpreadCase m_phantomNewArrayWithSpreadCase;
10210 CapturedForPhantomNewArrayBufferCase m_phantomNewArrayBufferCase;
10211 CapturedForPhantomNewArrayBufferEnd m_phantomNewArrayBufferEnd;
10212 CapturedForPhantomCreateRest m_phantomCreateRest;
10213 };
10214
10215 VarargsSpreadArgumentToEmit(VarargsSpreadArgumentToEmit::Type t, unsigned arg)
10216 : m_type(t)
10217 {
10218 if (m_type == PhantomNewArrayWithSpreadCase)
10219 m_phantomNewArrayWithSpreadCase = { arg };
10220 else {
10221 ASSERT(t == PhantomNewArrayBufferEnd);
10222 m_phantomNewArrayBufferEnd = { arg };
10223 }
10224 }
10225 VarargsSpreadArgumentToEmit(VarargsSpreadArgumentToEmit::Type t, int64_t value, int32_t currentStoreOffset)
10226 : m_type(t)
10227 , m_phantomNewArrayBufferCase({ value, currentStoreOffset })
10228 {
10229 ASSERT(t == PhantomNewArrayBufferCase);
10230 }
10231 VarargsSpreadArgumentToEmit(VarargsSpreadArgumentToEmit::Type t, InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip, unsigned parameterOffset)
10232 : m_type(t)
10233 , m_phantomCreateRest({ inlineCallFrame, numberOfArgumentsToSkip, parameterOffset })
10234 {
10235 ASSERT(t == PhantomCreateRest);
10236 }
10237 };
10238
10239 void compileCallOrConstructVarargsSpread()
10240 {
10241 Node* node = m_node;
10242 Node* arguments = node->child3().node();
10243
10244 LValue jsCallee = lowJSValue(m_node->child1());
10245 LValue thisArg = lowJSValue(m_node->child2());
10246
10247 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread || arguments->op() == PhantomNewArrayBuffer);
10248
10249 unsigned staticArgumentCount = 0;
10250 Vector<LValue, 2> spreadLengths;
10251 Vector<LValue, 8> patchpointArguments;
10252 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
10253 // Because the patchpoint generator runs late in Air, the dfg graph will be long gone.
10254 // So we must load everything relevant right now, and make sure that they are captured by value by the lambda that acts as the generator
10255 // One particularly tricky point is that the generator would like to walk over the tree rooted at this node, exploring through PhantomNewArrayWithSpread and PhantomNewArrayBuffer, emitting code along the way.
10256 // Instead, we do that walk here, and record just enough information in the following vector to emit the right code at the end of Air.
10257 Vector<VarargsSpreadArgumentToEmit> argumentsToEmitFromRightToLeft;
10258 int storeOffset = CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register));
10259 unsigned paramsOffset = 4;
10260 unsigned index = 0;
10261 auto pushAndCountArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
10262 switch (target->op()) {
10263 case PhantomSpread:
10264 self(target->child1().node());
10265 return;
10266 case PhantomNewArrayWithSpread: {
10267 BitVector* bitVector = target->bitVector();
10268 for (unsigned i = target->numChildren(); i--; ) {
10269 if (bitVector->get(i))
10270 self(m_graph.varArgChild(target, i).node());
10271 else {
10272 ++staticArgumentCount;
10273 LValue argument = this->lowJSValue(m_graph.varArgChild(target, i));
10274 patchpointArguments.append(argument);
10275 argumentsToEmitFromRightToLeft.append({ VarargsSpreadArgumentToEmit::Type::PhantomNewArrayWithSpreadCase, paramsOffset + (index++)});
10276 }
10277 }
10278 return;
10279 }
10280 case PhantomNewArrayBuffer: {
10281 auto* array = target->castOperand<JSImmutableButterfly*>();
10282 unsigned arrayLength = array->length();
10283 staticArgumentCount += arrayLength;
10284 Checked<int32_t> offsetCount { 1 };
10285 for (unsigned i = arrayLength; i--; ++offsetCount) {
10286 Checked<int32_t> currentStoreOffset { storeOffset };
10287 currentStoreOffset -= (offsetCount * static_cast<int32_t>(sizeof(Register)));
10288 // Because varargs values are drained as JSValue, we should not generate value
10289 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
10290 int64_t value = JSValue::encode(array->get(i));
10291 argumentsToEmitFromRightToLeft.append({ VarargsSpreadArgumentToEmit::Type::PhantomNewArrayBufferCase, value, currentStoreOffset.value() });
10292 }
10293 argumentsToEmitFromRightToLeft.append({ VarargsSpreadArgumentToEmit::Type::PhantomNewArrayBufferEnd, arrayLength });
10294 return;
10295 }
10296 case PhantomCreateRest: {
10297 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
10298 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
10299 unsigned parameterOffset = paramsOffset + (index++);
10300 LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
10301 return m_out.zeroExtPtr(this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
10302 }).iterator->value;
10303 patchpointArguments.append(length);
10304 spreadLengths.append(length);
10305 argumentsToEmitFromRightToLeft.append({ VarargsSpreadArgumentToEmit::Type::PhantomCreateRest, inlineCallFrame, numberOfArgumentsToSkip, parameterOffset });
10306 return;
10307 }
10308 default:
10309 RELEASE_ASSERT_NOT_REACHED();
10310 }
10311 });
10312 pushAndCountArgumentsFromRightToLeft(arguments);
10313
10314 LValue argumentCountIncludingThis = m_out.constIntPtr(staticArgumentCount + 1);
10315 for (LValue length : spreadLengths)
10316 argumentCountIncludingThis = m_out.add(length, argumentCountIncludingThis);
10317
10318 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10319
10320 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
10321 patchpoint->append(thisArg, ValueRep::WarmAny);
10322 patchpoint->append(argumentCountIncludingThis, ValueRep::WarmAny);
10323 patchpoint->appendVectorWithRep(patchpointArguments, ValueRep::WarmAny);
10324 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
10325 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
10326
10327 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
10328
10329 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10330 patchpoint->clobber(RegisterSet::volatileRegistersForJSCall()); // No inputs will be in a volatile register.
10331 patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
10332
10333 patchpoint->numGPScratchRegisters = 0;
10334
10335 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
10336 unsigned minimumJSCallAreaSize =
10337 sizeof(CallerFrameAndPC) +
10338 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
10339
10340 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
10341
10342 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
10343 State* state = &m_ftlState;
10344 VM* vm = &this->vm();
10345 CodeOrigin semanticNodeOrigin = node->origin.semantic;
10346 auto nodeOp = node->op();
10347 patchpoint->setGenerator(
10348 [=, argumentsToEmit = WTFMove(argumentsToEmitFromRightToLeft)] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10349 AllowMacroScratchRegisterUsage allowScratch(jit);
10350 CallSiteIndex callSiteIndex =
10351 state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(codeOrigin);
10352
10353 Box<CCallHelpers::JumpList> exceptions =
10354 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10355
10356 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
10357
10358 jit.store32(
10359 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
10360 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
10361
10362 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(semanticNodeOrigin);
10363
10364 RegisterSet usedRegisters = RegisterSet::allRegisters();
10365 usedRegisters.exclude(RegisterSet::volatileRegistersForJSCall());
10366 GPRReg calleeGPR = params[1].gpr();
10367 usedRegisters.set(calleeGPR);
10368
10369 ScratchRegisterAllocator allocator(usedRegisters);
10370 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
10371 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
10372 GPRReg scratchGPR3 = allocator.allocateScratchGPR();
10373 GPRReg scratchGPR4 = allocator.allocateScratchGPR();
10374 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
10375
10376 auto getValueFromRep = [&] (B3::ValueRep rep, GPRReg result) {
10377 ASSERT(!usedRegisters.get(result));
10378
10379 if (rep.isConstant()) {
10380 jit.move(CCallHelpers::Imm64(rep.value()), result);
10381 return;
10382 }
10383
10384 // Note: in this function, we only request 64 bit values.
10385 if (rep.isStack()) {
10386 jit.load64(
10387 CCallHelpers::Address(GPRInfo::callFrameRegister, rep.offsetFromFP()),
10388 result);
10389 return;
10390 }
10391
10392 RELEASE_ASSERT(rep.isGPR());
10393 ASSERT(usedRegisters.get(rep.gpr()));
10394 jit.move(rep.gpr(), result);
10395 };
10396
10397 auto callWithExceptionCheck = [&] (void(*callee)(JSGlobalObject*)) {
10398 jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
10399 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
10400 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
10401 };
10402
10403 CCallHelpers::JumpList slowCase;
10404 unsigned originalStackHeight = params.proc().frameSize();
10405
10406 {
10407 unsigned numUsedSlots = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), originalStackHeight / sizeof(EncodedJSValue));
10408 B3::ValueRep argumentCountIncludingThisRep = params[3];
10409 getValueFromRep(argumentCountIncludingThisRep, scratchGPR2);
10410 slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR2, CCallHelpers::TrustedImm32(JSC::maxArguments + 1)));
10411
10412 jit.move(scratchGPR2, scratchGPR1);
10413 jit.addPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(numUsedSlots + CallFrame::headerSizeInRegisters)), scratchGPR1);
10414 // scratchGPR1 now has the required frame size in Register units
10415 // Round scratchGPR1 to next multiple of stackAlignmentRegisters()
10416 jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), scratchGPR1);
10417 jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), scratchGPR1);
10418 jit.negPtr(scratchGPR1);
10419 jit.getEffectiveAddress(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight), scratchGPR1);
10420
10421 // Before touching stack values, we should update the stack pointer to protect them from signal stack.
10422 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR1, CCallHelpers::stackPointerRegister);
10423
10424 jit.store32(scratchGPR2, CCallHelpers::Address(scratchGPR1, CallFrameSlot::argumentCountIncludingThis * static_cast<int>(sizeof(Register)) + PayloadOffset));
10425
10426 for (const auto& argumentToEmit : argumentsToEmit) {
10427 switch (argumentToEmit.m_type) {
10428 case VarargsSpreadArgumentToEmit::PhantomNewArrayWithSpreadCase: {
10429 unsigned parameterOffset = argumentToEmit.m_phantomNewArrayWithSpreadCase.parameterOffset;
10430 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
10431 getValueFromRep(params[parameterOffset], scratchGPR3);
10432 jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
10433 continue;
10434 }
10435 case VarargsSpreadArgumentToEmit::PhantomNewArrayBufferCase: {
10436 int64_t value = argumentToEmit.m_phantomNewArrayBufferCase.value;
10437 int32_t currentStoreOffset = argumentToEmit.m_phantomNewArrayBufferCase.currentStoreOffset;
10438 jit.move(CCallHelpers::TrustedImm64(value), scratchGPR3);
10439 jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, currentStoreOffset));
10440 continue;
10441 }
10442 case VarargsSpreadArgumentToEmit::PhantomNewArrayBufferEnd: {
10443 size_t arrayLength = static_cast<size_t>(argumentToEmit.m_phantomNewArrayBufferEnd.arrayLength);
10444 jit.subPtr(CCallHelpers::TrustedImmPtr(arrayLength), scratchGPR2);
10445 continue;
10446 }
10447 case VarargsSpreadArgumentToEmit::PhantomCreateRest: {
10448 InlineCallFrame* inlineCallFrame = argumentToEmit.m_phantomCreateRest.inlineCallFrame;
10449 unsigned numberOfArgumentsToSkip = argumentToEmit.m_phantomCreateRest.numberOfArgumentsToSkip;
10450 unsigned parameterOffset = argumentToEmit.m_phantomCreateRest.parameterOffset;
10451
10452 B3::ValueRep numArgumentsToCopy = params[parameterOffset];
10453 getValueFromRep(numArgumentsToCopy, scratchGPR3);
10454 int loadOffset = (AssemblyHelpers::argumentsStart(inlineCallFrame).offset() + numberOfArgumentsToSkip) * static_cast<int>(sizeof(Register));
10455
10456 auto done = jit.branchTestPtr(MacroAssembler::Zero, scratchGPR3);
10457 auto loopStart = jit.label();
10458 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR3);
10459 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
10460 jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR3, CCallHelpers::TimesEight, loadOffset), scratchGPR4);
10461 jit.store64(scratchGPR4,
10462 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
10463 jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR3).linkTo(loopStart, &jit);
10464 done.link(&jit);
10465 }
10466 }
10467 }
10468 }
10469
10470 {
10471 CCallHelpers::Jump dontThrow = jit.jump();
10472 slowCase.link(&jit);
10473 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>(jit.codeBlock()->globalObjectFor(semanticNodeOrigin));
10474 jit.prepareCallOperation(jit.vm());
10475 callWithExceptionCheck(operationThrowStackOverflowForVarargs);
10476 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
10477
10478 dontThrow.link(&jit);
10479 }
10480
10481 ASSERT(calleeGPR == GPRInfo::regT0);
10482 jit.store64(calleeGPR, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
10483 getValueFromRep(params[2], scratchGPR3);
10484 jit.store64(scratchGPR3, CCallHelpers::calleeArgumentSlot(0));
10485
10486 CallLinkInfo::CallType callType;
10487 if (nodeOp == ConstructVarargs || nodeOp == ConstructForwardVarargs)
10488 callType = CallLinkInfo::ConstructVarargs;
10489 else if (nodeOp == TailCallVarargs || nodeOp == TailCallForwardVarargs)
10490 callType = CallLinkInfo::TailCallVarargs;
10491 else
10492 callType = CallLinkInfo::CallVarargs;
10493
10494 callLinkInfo->setUpCall(callType, GPRInfo::regT0);
10495
10496 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
10497
10498 ASSERT(!usedRegisters.get(GPRInfo::regT2)); // Used on the slow path.
10499
10500 CCallHelpers::JumpList slowPath;
10501 CCallHelpers::Jump done;
10502 if (isTailCall) {
10503 slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No, [&] {
10504 jit.emitRestoreCalleeSaves();
10505 jit.prepareForTailCallSlow();
10506 });
10507 } else {
10508 slowPath = callLinkInfo->emitFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No);
10509 done = jit.jump();
10510 }
10511
10512 slowPath.link(&jit);
10513 auto slowPathStart = jit.label();
10514
10515 if (isTailCall)
10516 jit.emitRestoreCalleeSaves();
10517 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()->globalObjectFor(semanticNodeOrigin)), GPRInfo::regT3);
10518 callLinkInfo->emitSlowPath(*vm, jit);
10519
10520 if (isTailCall)
10521 jit.abortWithReason(JITDidReturnFromTailCall);
10522 else
10523 done.link(&jit);
10524
10525 auto doneLocation = jit.label();
10526
10527 jit.addPtr(
10528 CCallHelpers::TrustedImm32(-originalStackHeight),
10529 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
10530
10531 jit.addLinkTask(
10532 [=] (LinkBuffer& linkBuffer) {
10533 callLinkInfo->setCodeLocations(
10534 linkBuffer.locationOf<JSInternalPtrTag>(slowPathStart),
10535 linkBuffer.locationOf<JSInternalPtrTag>(doneLocation));
10536 });
10537 });
10538
10539 switch (node->op()) {
10540 case TailCallForwardVarargs:
10541 m_out.unreachable();
10542 break;
10543
10544 default:
10545 setJSValue(patchpoint);
10546 break;
10547 }
10548 }
10549
10550 void compileCallOrConstructVarargs()
10551 {
10552 Node* node = m_node;
10553 LValue jsCallee = lowJSValue(m_node->child1());
10554 LValue thisArg = lowJSValue(m_node->child2());
10555
10556 LValue jsArguments = nullptr;
10557 bool forwarding = false;
10558
10559 switch (node->op()) {
10560 case CallVarargs:
10561 case TailCallVarargs:
10562 case TailCallVarargsInlinedCaller:
10563 case ConstructVarargs:
10564 jsArguments = lowJSValue(node->child3());
10565 break;
10566 case CallForwardVarargs:
10567 case TailCallForwardVarargs:
10568 case TailCallForwardVarargsInlinedCaller:
10569 case ConstructForwardVarargs:
10570 forwarding = true;
10571 break;
10572 default:
10573 DFG_CRASH(m_graph, node, "bad node type");
10574 break;
10575 }
10576
10577 if (forwarding && m_node->child3()) {
10578 Node* arguments = m_node->child3().node();
10579 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
10580 compileCallOrConstructVarargsSpread();
10581 return;
10582 }
10583 }
10584
10585
10586 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10587
10588 // Append the forms of the arguments that we will use before any clobbering happens.
10589 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
10590 if (jsArguments)
10591 patchpoint->appendSomeRegister(jsArguments);
10592 patchpoint->appendSomeRegister(thisArg);
10593
10594 if (!forwarding) {
10595 // Now append them again for after clobbering. Note that the compiler may ask us to use a
10596 // different register for the late for the post-clobbering version of the value. This gives
10597 // the compiler a chance to spill these values without having to burn any callee-saves.
10598 patchpoint->append(jsCallee, ValueRep::LateColdAny);
10599 patchpoint->append(jsArguments, ValueRep::LateColdAny);
10600 patchpoint->append(thisArg, ValueRep::LateColdAny);
10601 }
10602
10603 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10604 preparePatchpointForExceptions(patchpoint);
10605
10606 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
10607 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
10608
10609 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10610 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
10611 patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
10612
10613 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
10614 unsigned minimumJSCallAreaSize =
10615 sizeof(CallerFrameAndPC) +
10616 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
10617
10618 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
10619
10620 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
10621 State* state = &m_ftlState;
10622 VM* vm = &this->vm();
10623 CodeOrigin semanticNodeOrigin = node->origin.semantic;
10624 InlineCallFrame* inlineCallFrame;
10625 if (node->child3())
10626 inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame();
10627 else
10628 inlineCallFrame = semanticNodeOrigin.inlineCallFrame();
10629 CallVarargsData* data = node->callVarargsData();
10630 auto nodeOp = node->op();
10631 patchpoint->setGenerator(
10632 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10633 AllowMacroScratchRegisterUsage allowScratch(jit);
10634 CallSiteIndex callSiteIndex =
10635 state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(codeOrigin);
10636
10637 Box<CCallHelpers::JumpList> exceptions =
10638 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10639
10640 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
10641
10642 jit.store32(
10643 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
10644 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
10645
10646 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(semanticNodeOrigin);
10647
10648 unsigned argIndex = 1;
10649 GPRReg calleeGPR = params[argIndex++].gpr();
10650 ASSERT(calleeGPR == GPRInfo::regT0);
10651 GPRReg argumentsGPR = jsArguments ? params[argIndex++].gpr() : InvalidGPRReg;
10652 GPRReg thisGPR = params[argIndex++].gpr();
10653
10654 B3::ValueRep calleeLateRep;
10655 B3::ValueRep argumentsLateRep;
10656 B3::ValueRep thisLateRep;
10657 if (!forwarding) {
10658 // If we're not forwarding then we'll need callee, arguments, and this after we
10659 // have potentially clobbered calleeGPR, argumentsGPR, and thisGPR. Our technique
10660 // for this is to supply all of those operands as late uses in addition to
10661 // specifying them as early uses. It's possible that the late use uses a spill
10662 // while the early use uses a register, and it's possible for the late and early
10663 // uses to use different registers. We do know that the late uses interfere with
10664 // all volatile registers and so won't use those, but the early uses may use
10665 // volatile registers and in the case of calleeGPR, it's pinned to regT0 so it
10666 // definitely will.
10667 //
10668 // Note that we have to be super careful with these. It's possible that these
10669 // use a shuffling of the registers used for calleeGPR, argumentsGPR, and
10670 // thisGPR. If that happens and we do for example:
10671 //
10672 // calleeLateRep.emitRestore(jit, calleeGPR);
10673 // argumentsLateRep.emitRestore(jit, calleeGPR);
10674 //
10675 // Then we might end up with garbage if calleeLateRep.gpr() == argumentsGPR and
10676 // argumentsLateRep.gpr() == calleeGPR.
10677 //
10678 // We do a variety of things to prevent this from happening. For example, we use
10679 // argumentsLateRep before needing the other two and after we've already stopped
10680 // using the *GPRs. Also, we pin calleeGPR to regT0, and rely on the fact that
10681 // the *LateReps cannot use volatile registers (so they cannot be regT0, so
10682 // calleeGPR != argumentsLateRep.gpr() and calleeGPR != thisLateRep.gpr()).
10683 //
10684 // An alternative would have been to just use early uses and early-clobber all
10685 // volatile registers. But that would force callee, arguments, and this into
10686 // callee-save registers even if we have to spill them. We don't want spilling to
10687 // use up three callee-saves.
10688 //
10689 // TL;DR: The way we use LateReps here is dangerous and barely works but achieves
10690 // some desirable performance properties, so don't mistake the cleverness for
10691 // elegance.
10692 calleeLateRep = params[argIndex++];
10693 argumentsLateRep = params[argIndex++];
10694 thisLateRep = params[argIndex++];
10695 }
10696
10697 // Get some scratch registers.
10698 RegisterSet usedRegisters;
10699 usedRegisters.merge(RegisterSet::stackRegisters());
10700 usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
10701 usedRegisters.merge(RegisterSet::calleeSaveRegisters());
10702 usedRegisters.set(calleeGPR);
10703 if (argumentsGPR != InvalidGPRReg)
10704 usedRegisters.set(argumentsGPR);
10705 usedRegisters.set(thisGPR);
10706 if (calleeLateRep.isReg())
10707 usedRegisters.set(calleeLateRep.reg());
10708 if (argumentsLateRep.isReg())
10709 usedRegisters.set(argumentsLateRep.reg());
10710 if (thisLateRep.isReg())
10711 usedRegisters.set(thisLateRep.reg());
10712 ScratchRegisterAllocator allocator(usedRegisters);
10713 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
10714 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
10715 GPRReg scratchGPR3 = forwarding ? allocator.allocateScratchGPR() : InvalidGPRReg;
10716 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
10717
10718 auto callWithExceptionCheck = [&] (void(*callee)()) {
10719 jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
10720 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
10721 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
10722 };
10723
10724 unsigned originalStackHeight = params.proc().frameSize();
10725
10726 if (forwarding) {
10727 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
10728
10729 CCallHelpers::JumpList slowCase;
10730
10731 // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
10732 emitSetupVarargsFrameFastCase(*vm, jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
10733
10734 CCallHelpers::Jump done = jit.jump();
10735 slowCase.link(&jit);
10736 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>(jit.codeBlock()->globalObjectFor(semanticNodeOrigin));
10737 jit.prepareCallOperation(jit.vm());
10738 callWithExceptionCheck(bitwise_cast<void(*)()>(operationThrowStackOverflowForVarargs));
10739 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
10740
10741 done.link(&jit);
10742 } else {
10743 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1);
10744 jit.setupArguments<decltype(operationSizeFrameForVarargs)>(jit.codeBlock()->globalObjectFor(semanticNodeOrigin), argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
10745 jit.prepareCallOperation(jit.vm());
10746 callWithExceptionCheck(bitwise_cast<void(*)()>(operationSizeFrameForVarargs));
10747
10748 jit.move(GPRInfo::returnValueGPR, scratchGPR1);
10749 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
10750 argumentsLateRep.emitRestore(jit, argumentsGPR);
10751 emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
10752 jit.addPtr(CCallHelpers::TrustedImm32(-minimumJSCallAreaSize), scratchGPR2, CCallHelpers::stackPointerRegister);
10753 jit.setupArguments<decltype(operationSetupVarargsFrame)>(jit.codeBlock()->globalObjectFor(semanticNodeOrigin), scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
10754 jit.prepareCallOperation(jit.vm());
10755 callWithExceptionCheck(bitwise_cast<void(*)()>(operationSetupVarargsFrame));
10756
10757 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, CCallHelpers::stackPointerRegister);
10758
10759 calleeLateRep.emitRestore(jit, GPRInfo::regT0);
10760
10761 // This may not emit code if thisGPR got a callee-save. Also, we're guaranteed
10762 // that thisGPR != GPRInfo::regT0 because regT0 interferes with it.
10763 thisLateRep.emitRestore(jit, thisGPR);
10764 }
10765
10766 jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
10767 jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
10768
10769 CallLinkInfo::CallType callType;
10770 if (nodeOp == ConstructVarargs || nodeOp == ConstructForwardVarargs)
10771 callType = CallLinkInfo::ConstructVarargs;
10772 else if (nodeOp == TailCallVarargs || nodeOp == TailCallForwardVarargs)
10773 callType = CallLinkInfo::TailCallVarargs;
10774 else
10775 callType = CallLinkInfo::CallVarargs;
10776
10777 callLinkInfo->setUpCall(callType, GPRInfo::regT0);
10778
10779 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
10780
10781 CCallHelpers::JumpList slowPath;
10782 CCallHelpers::Jump done;
10783 if (isTailCall) {
10784 slowPath = callLinkInfo->emitTailCallFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No, [&] {
10785 jit.emitRestoreCalleeSaves();
10786 jit.prepareForTailCallSlow();
10787 });
10788 } else {
10789 slowPath = callLinkInfo->emitFastPath(jit, GPRInfo::regT0, InvalidGPRReg, CallLinkInfo::UseDataIC::No);
10790 done = jit.jump();
10791 }
10792
10793 slowPath.link(&jit);
10794 auto slowPathStart = jit.label();
10795
10796 if (isTailCall)
10797 jit.emitRestoreCalleeSaves();
10798 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()->globalObjectFor(semanticNodeOrigin)), GPRInfo::regT3);
10799 callLinkInfo->emitSlowPath(*vm, jit);
10800
10801 if (isTailCall)
10802 jit.abortWithReason(JITDidReturnFromTailCall);
10803 else
10804 done.link(&jit);
10805
10806 auto doneLocation = jit.label();
10807
10808 jit.addPtr(
10809 CCallHelpers::TrustedImm32(-originalStackHeight),
10810 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
10811
10812 jit.addLinkTask(
10813 [=] (LinkBuffer& linkBuffer) {
10814 callLinkInfo->setCodeLocations(
10815 linkBuffer.locationOf<JSInternalPtrTag>(slowPathStart),
10816 linkBuffer.locationOf<JSInternalPtrTag>(doneLocation));
10817 });
10818 });
10819
10820 switch (node->op()) {
10821 case TailCallVarargs:
10822 case TailCallForwardVarargs:
10823 m_out.unreachable();
10824 break;
10825
10826 default:
10827 setJSValue(patchpoint);
10828 break;
10829 }
10830 }
10831
10832 void compileCallEval()
10833 {
10834 Node* node = m_node;
10835 unsigned numArgs = node->numChildren() - 1;
10836
10837 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
10838
10839 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
10840 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
10841
10842 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
10843
10844 Vector<ConstrainedValue> arguments;
10845 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
10846
10847 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
10848 intptr_t offsetFromSP =
10849 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
10850 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
10851 };
10852
10853 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
10854 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCountIncludingThis), PayloadOffset);
10855 for (unsigned i = 0; i < numArgs; ++i)
10856 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgumentIncludingThis(i), 0);
10857
10858 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10859 patchpoint->appendVector(arguments);
10860
10861 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
10862
10863 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
10864 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
10865 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10866 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
10867 patchpoint->resultConstraints = { ValueRep::reg(GPRInfo::returnValueGPR) };
10868
10869 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
10870 State* state = &m_ftlState;
10871 VM& vm = this->vm();
10872 CodeOrigin semanticNodeOrigin = node->origin.semantic;
10873 auto ecmaMode = node->ecmaMode().value();
10874 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
10875 patchpoint->setGenerator(
10876 [=, &vm] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10877 AllowMacroScratchRegisterUsage allowScratch(jit);
10878 CallSiteIndex callSiteIndex = state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(codeOrigin);
10879
10880 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10881
10882 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
10883
10884 jit.store32(
10885 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
10886 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
10887
10888 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo(semanticNodeOrigin);
10889 callLinkInfo->setUpCall(CallLinkInfo::Call, GPRInfo::regT0);
10890
10891 jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), CCallHelpers::stackPointerRegister, GPRInfo::regT1);
10892 jit.storePtr(GPRInfo::callFrameRegister, CCallHelpers::Address(GPRInfo::regT1, CallFrame::callerFrameOffset()));
10893
10894 // Now we need to make room for:
10895 // - The caller frame and PC for a call to operationCallEval.
10896 // - Potentially two arguments on the stack.
10897 unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(CallFrame*) * 2;
10898 requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
10899 jit.subPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
10900 jit.move(CCallHelpers::TrustedImm32(ecmaMode), GPRInfo::regT2);
10901 jit.setupArguments<decltype(operationCallEval)>(globalObject, GPRInfo::regT1, GPRInfo::regT2);
10902 jit.prepareCallOperation(vm);
10903 jit.move(CCallHelpers::TrustedImmPtr(tagCFunction<OperationPtrTag>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR0);
10904 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
10905 exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
10906
10907 CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
10908
10909 jit.addPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
10910 jit.load64(CCallHelpers::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0);
10911 jit.emitVirtualCall(vm, globalObject, callLinkInfo);
10912
10913 done.link(&jit);
10914 jit.addPtr(
10915 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
10916 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
10917 });
10918
10919 setJSValue(patchpoint);
10920 }
10921
10922 void compileVarargsLength()
10923 {
10924 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
10925 LoadVarargsData* data = m_node->loadVarargsData();
10926 LValue jsArguments = lowJSValue(m_node->argumentsChild());
10927
10928 LValue length = m_out.castToInt32(vmCall(Int64, operationSizeOfVarargs, weakPointer(globalObject), jsArguments, m_out.constInt32(data->offset)));
10929
10930 LValue lengthIncludingThis = m_out.add(length, m_out.int32One);
10931
10932 setInt32(lengthIncludingThis);
10933 }
10934
10935 void compileLoadVarargs()
10936 {
10937 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
10938 LoadVarargsData* data = m_node->loadVarargsData();
10939 LValue jsArguments = lowJSValue(m_node->argumentsChild());
10940 LValue lengthIncludingThis = lowInt32(m_node->child1());
10941
10942 speculate(
10943 VarargsOverflow, noValue(), nullptr,
10944 m_out.bitOr(m_out.isZero32(lengthIncludingThis), m_out.above(lengthIncludingThis, m_out.constInt32(data->limit))));
10945
10946 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
10947
10948 // FIXME: This computation is rather silly. If operationLoadVarargs just took a pointer instead
10949 // of a VirtualRegister, we wouldn't have to do this.
10950 // https://bugs.webkit.org/show_bug.cgi?id=141660
10951 LValue machineStart = m_out.lShr(
10952 m_out.sub(addressFor(data->machineStart).value(), m_callFrame),
10953 m_out.constIntPtr(3));
10954
10955 vmCall(
10956 Void, operationLoadVarargs, weakPointer(globalObject),
10957 m_out.castToInt32(machineStart), jsArguments, m_out.constInt32(data->offset),
10958 lengthIncludingThis, m_out.constInt32(data->mandatoryMinimum));
10959 }
10960
10961 void compileForwardVarargs()
10962 {
10963 if (m_node->argumentsChild()) {
10964 Node* arguments = m_node->argumentsChild().node();
10965 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
10966 compileForwardVarargsWithSpread();
10967 return;
10968 }
10969 }
10970
10971 LoadVarargsData* data = m_node->loadVarargsData();
10972 InlineCallFrame* inlineCallFrame;
10973 if (m_node->argumentsChild())
10974 inlineCallFrame = m_node->argumentsChild()->origin.semantic.inlineCallFrame();
10975 else
10976 inlineCallFrame = m_origin.semantic.inlineCallFrame();
10977
10978 unsigned numberOfArgumentsToSkip = data->offset;
10979 LValue lengthIncludingThis = lowInt32(m_node->child1());
10980
10981 LValue length = m_out.sub(lengthIncludingThis, m_out.int32One);
10982 speculate(
10983 VarargsOverflow, noValue(), nullptr,
10984 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
10985
10986 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
10987
10988 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
10989 LValue targetStart = addressFor(data->machineStart).value();
10990
10991 LBasicBlock undefinedLoop = m_out.newBlock();
10992 LBasicBlock mainLoopEntry = m_out.newBlock();
10993 LBasicBlock mainLoop = m_out.newBlock();
10994 LBasicBlock continuation = m_out.newBlock();
10995
10996 LValue lengthAsPtr = m_out.zeroExtPtr(length);
10997 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
10998 ValueFromBlock loopBound = m_out.anchor(loopBoundValue);
10999 m_out.branch(
11000 m_out.above(loopBoundValue, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
11001
11002 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, mainLoopEntry);
11003 LValue previousIndex = m_out.phi(pointerType(), loopBound);
11004 LValue currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
11005 m_out.store64(
11006 m_out.constInt64(JSValue::encode(jsUndefined())),
11007 m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
11008 ValueFromBlock nextIndex = m_out.anchor(currentIndex);
11009 m_out.addIncomingToPhi(previousIndex, nextIndex);
11010 m_out.branch(
11011 m_out.above(currentIndex, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
11012
11013 m_out.appendTo(mainLoopEntry, mainLoop);
11014 loopBound = m_out.anchor(lengthAsPtr);
11015 m_out.branch(m_out.notNull(lengthAsPtr), unsure(mainLoop), unsure(continuation));
11016
11017 m_out.appendTo(mainLoop, continuation);
11018 previousIndex = m_out.phi(pointerType(), loopBound);
11019 currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
11020 LValue value = m_out.load64(
11021 m_out.baseIndex(m_heaps.variables, sourceStart, currentIndex));
11022 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
11023 nextIndex = m_out.anchor(currentIndex);
11024 m_out.addIncomingToPhi(previousIndex, nextIndex);
11025 m_out.branch(m_out.isNull(currentIndex), unsure(continuation), unsure(mainLoop));
11026
11027 m_out.appendTo(continuation, lastNext);
11028 }
11029
11030 LValue getSpreadLengthFromInlineCallFrame(InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip)
11031 {
11032 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
11033 if (argumentsLength.isKnown) {
11034 unsigned knownLength = argumentsLength.known;
11035 if (knownLength >= numberOfArgumentsToSkip)
11036 knownLength = knownLength - numberOfArgumentsToSkip;
11037 else
11038 knownLength = 0;
11039 return m_out.constInt32(knownLength);
11040 }
11041
11042
11043 // We need to perform the same logical operation as the code above, but through dynamic operations.
11044 if (!numberOfArgumentsToSkip)
11045 return argumentsLength.value;
11046
11047 RELEASE_ASSERT(numberOfArgumentsToSkip < static_cast<unsigned>(INT32_MIN));
11048
11049 LValue fixedLength = m_out.sub(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip));
11050
11051 return m_out.select(m_out.greaterThanOrEqual(fixedLength, m_out.int32Zero), fixedLength, m_out.int32Zero, SelectPredictability::Predictable);
11052 }
11053
11054 void compileForwardVarargsWithSpread()
11055 {
11056 Node* arguments = m_node->argumentsChild().node();
11057 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread);
11058
11059 LValue lengthIncludingThis = lowInt32(m_node->child1());
11060
11061 LoadVarargsData* data = m_node->loadVarargsData();
11062 speculate(
11063 VarargsOverflow, noValue(), nullptr,
11064 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
11065
11066 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
11067
11068 LValue targetStart = addressFor(data->machineStart).value();
11069
11070 auto forwardSpread = recursableLambda([this, &targetStart](auto self, Node* target, LValue storeIndex) -> LValue {
11071 if (target->op() == PhantomSpread)
11072 return self(target->child1().node(), storeIndex);
11073
11074 if (target->op() == PhantomNewArrayWithSpread) {
11075 BitVector* bitVector = target->bitVector();
11076 for (unsigned i = 0; i < target->numChildren(); i++) {
11077 if (bitVector->get(i))
11078 storeIndex = self(m_graph.varArgChild(target, i).node(), storeIndex);
11079 else {
11080 LValue value = this->lowJSValue(m_graph.varArgChild(target, i));
11081 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, storeIndex));
11082 storeIndex = m_out.add(m_out.constIntPtr(1), storeIndex);
11083 }
11084 }
11085 return storeIndex;
11086 }
11087
11088 if (target->op() == PhantomNewArrayBuffer) {
11089 auto* array = target->castOperand<JSImmutableButterfly*>();
11090 for (unsigned i = 0; i < array->length(); i++) {
11091 // Because forwarded values are drained as JSValue, we should not generate value
11092 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
11093 int64_t value = JSValue::encode(array->get(i));
11094 m_out.store64(m_out.constInt64(value), m_out.baseIndex(m_heaps.variables, targetStart, storeIndex, JSValue(), Checked<int32_t>(sizeof(Register)) * i));
11095 }
11096 return m_out.add(m_out.constIntPtr(array->length()), storeIndex);
11097 }
11098
11099 RELEASE_ASSERT(target->op() == PhantomCreateRest);
11100 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
11101
11102 auto numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
11103 LValue sourceStart = this->getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
11104 LValue spreadLength = m_out.zeroExtPtr(getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
11105
11106 LBasicBlock loop = m_out.newBlock();
11107 LBasicBlock continuation = m_out.newBlock();
11108 ValueFromBlock startLoadIndex = m_out.anchor(m_out.constIntPtr(0));
11109 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
11110 ValueFromBlock startStoreIndexForEnd = m_out.anchor(storeIndex);
11111
11112 m_out.branch(m_out.isZero64(spreadLength), unsure(continuation), unsure(loop));
11113
11114 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
11115 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
11116 LValue loadIndex = m_out.phi(Int64, startLoadIndex);
11117 LValue value = m_out.load64(
11118 m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
11119 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
11120 LValue nextLoadIndex = m_out.add(m_out.constIntPtr(1), loadIndex);
11121 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
11122 LValue nextStoreIndex = m_out.add(m_out.constIntPtr(1), loopStoreIndex);
11123 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextStoreIndex));
11124 ValueFromBlock loopStoreIndexForEnd = m_out.anchor(nextStoreIndex);
11125 m_out.branch(m_out.below(nextLoadIndex, spreadLength), unsure(loop), unsure(continuation));
11126
11127 m_out.appendTo(continuation, lastNext);
11128 return m_out.phi(Int64, startStoreIndexForEnd, loopStoreIndexForEnd);
11129 });
11130
11131 LValue storeIndex = forwardSpread(arguments, m_out.constIntPtr(0));
11132
11133 LBasicBlock undefinedLoop = m_out.newBlock();
11134 LBasicBlock continuation = m_out.newBlock();
11135
11136 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
11137 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
11138 m_out.branch(m_out.below(storeIndex, loopBoundValue),
11139 unsure(undefinedLoop), unsure(continuation));
11140
11141 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, continuation);
11142 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
11143 m_out.store64(
11144 m_out.constInt64(JSValue::encode(jsUndefined())),
11145 m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
11146 LValue nextIndex = m_out.add(loopStoreIndex, m_out.constIntPtr(1));
11147 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextIndex));
11148 m_out.branch(
11149 m_out.below(nextIndex, loopBoundValue), unsure(undefinedLoop), unsure(continuation));
11150
11151 m_out.appendTo(continuation, lastNext);
11152 }
11153
11154 void compileJump()
11155 {
11156 m_out.jump(lowBlock(m_node->targetBlock()));
11157 }
11158
11159 void compileBranch()
11160 {
11161 m_out.branch(
11162 boolify(m_node->child1()),
11163 WeightedTarget(
11164 lowBlock(m_node->branchData()->taken.block),
11165 m_node->branchData()->taken.count),
11166 WeightedTarget(
11167 lowBlock(m_node->branchData()->notTaken.block),
11168 m_node->branchData()->notTaken.count));
11169 }
11170
11171 void compileSwitch()
11172 {
11173 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
11174 SwitchData* data = m_node->switchData();
11175 switch (data->kind) {
11176 case SwitchImm: {
11177 Vector<ValueFromBlock, 2> intValues;
11178 LBasicBlock switchOnInts = m_out.newBlock();
11179
11180 LBasicBlock lastNext = m_out.appendTo(m_out.m_block, switchOnInts);
11181
11182 switch (m_node->child1().useKind()) {
11183 case Int32Use: {
11184 intValues.append(m_out.anchor(lowInt32(m_node->child1())));
11185 m_out.jump(switchOnInts);
11186 break;
11187 }
11188
11189 case UntypedUse: {
11190 LBasicBlock isInt = m_out.newBlock();
11191 LBasicBlock isNotInt = m_out.newBlock();
11192 LBasicBlock isDouble = m_out.newBlock();
11193
11194 LValue boxedValue = lowJSValue(m_node->child1());
11195 m_out.branch(isNotInt32(boxedValue), unsure(isNotInt), unsure(isInt));
11196
11197 LBasicBlock innerLastNext = m_out.appendTo(isInt, isNotInt);
11198
11199 intValues.append(m_out.anchor(unboxInt32(boxedValue)));
11200 m_out.jump(switchOnInts);
11201
11202 m_out.appendTo(isNotInt, isDouble);
11203 m_out.branch(
11204 isCellOrMiscOrBigInt32(boxedValue, provenType(m_node->child1())),
11205 usually(lowBlock(data->fallThrough.block)), rarely(isDouble));
11206
11207 m_out.appendTo(isDouble, innerLastNext);
11208 LValue doubleValue = unboxDouble(boxedValue);
11209 LValue intInDouble = m_out.doubleToInt(doubleValue);
11210 intValues.append(m_out.anchor(intInDouble));
11211 m_out.branch(
11212 m_out.doubleEqual(m_out.intToDouble(intInDouble), doubleValue),
11213 unsure(switchOnInts), unsure(lowBlock(data->fallThrough.block)));
11214 break;
11215 }
11216
11217 default:
11218 DFG_CRASH(m_graph, m_node, "Bad use kind");
11219 break;
11220 }
11221
11222 m_out.appendTo(switchOnInts, lastNext);
11223 buildSwitch(data, Int32, m_out.phi(Int32, intValues));
11224 return;
11225 }
11226
11227 case SwitchChar: {
11228 LValue stringValue;
11229
11230 // FIXME: We should use something other than unsure() for the branch weight
11231 // of the fallThrough block. The main challenge is just that we have multiple
11232 // branches to fallThrough but a single count, so we would need to divvy it up
11233 // among the different lowered branches.
11234 // https://bugs.webkit.org/show_bug.cgi?id=129082
11235
11236 switch (m_node->child1().useKind()) {
11237 case StringUse: {
11238 stringValue = lowString(m_node->child1());
11239 break;
11240 }
11241
11242 case UntypedUse: {
11243 LValue unboxedValue = lowJSValue(m_node->child1());
11244
11245 LBasicBlock isCellCase = m_out.newBlock();
11246 LBasicBlock isStringCase = m_out.newBlock();
11247
11248 m_out.branch(
11249 isNotCell(unboxedValue, provenType(m_node->child1())),
11250 unsure(lowBlock(data->fallThrough.block)), unsure(isCellCase));
11251
11252 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
11253 LValue cellValue = unboxedValue;
11254 m_out.branch(
11255 isNotString(cellValue, provenType(m_node->child1())),
11256 unsure(lowBlock(data->fallThrough.block)), unsure(isStringCase));
11257
11258 m_out.appendTo(isStringCase, lastNext);
11259 stringValue = cellValue;
11260 break;
11261 }
11262
11263 default:
11264 DFG_CRASH(m_graph, m_node, "Bad use kind");
11265 break;
11266 }
11267
11268 LBasicBlock lengthIs1 = m_out.newBlock();
11269 LBasicBlock needResolution = m_out.newBlock();
11270 LBasicBlock resolved = m_out.newBlock();
11271 LBasicBlock is8Bit = m_out.newBlock();
11272 LBasicBlock is16Bit = m_out.newBlock();
11273 LBasicBlock continuation = m_out.newBlock();
11274
11275 ValueFromBlock fastValue = m_out.anchor(m_out.loadPtr(stringValue, m_heaps.JSString_value));
11276 m_out.branch(
11277 isRopeString(stringValue, m_node->child1()),
11278 rarely(needResolution), usually(resolved));
11279
11280 LBasicBlock lastNext = m_out.appendTo(needResolution, resolved);
11281 ValueFromBlock slowValue = m_out.anchor(
11282 vmCall(pointerType(), operationResolveRope, weakPointer(globalObject), stringValue));
11283 m_out.jump(resolved);
11284
11285 m_out.appendTo(resolved, lengthIs1);
11286 LValue value = m_out.phi(pointerType(), fastValue, slowValue);
11287 m_out.branch(
11288 m_out.notEqual(
11289 m_out.load32NonNegative(value, m_heaps.StringImpl_length),
11290 m_out.int32One),
11291 unsure(lowBlock(data->fallThrough.block)), unsure(lengthIs1));
11292
11293 m_out.appendTo(lengthIs1, is8Bit);
11294 LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data);
11295 m_out.branch(
11296 m_out.testNonZero32(
11297 m_out.load32(value, m_heaps.StringImpl_hashAndFlags),
11298 m_out.constInt32(StringImpl::flagIs8Bit())),
11299 unsure(is8Bit), unsure(is16Bit));
11300
11301 Vector<ValueFromBlock, 2> characters;
11302 m_out.appendTo(is8Bit, is16Bit);
11303 characters.append(m_out.anchor(m_out.load8ZeroExt32(characterData, m_heaps.characters8[0])));
11304 m_out.jump(continuation);
11305
11306 m_out.appendTo(is16Bit, continuation);
11307 characters.append(m_out.anchor(m_out.load16ZeroExt32(characterData, m_heaps.characters16[0])));
11308 m_out.jump(continuation);
11309
11310 m_out.appendTo(continuation, lastNext);
11311 buildSwitch(data, Int32, m_out.phi(Int32, characters));
11312 return;
11313 }
11314
11315 case SwitchString: {
11316 switch (m_node->child1().useKind()) {
11317 case StringIdentUse: {
11318 LValue stringImpl = lowStringIdent(m_node->child1());
11319
11320 Vector<SwitchCase> cases;
11321 for (unsigned i = 0; i < data->cases.size(); ++i) {
11322 LValue value = m_out.constIntPtr(data->cases[i].value.stringImpl());
11323 LBasicBlock block = lowBlock(data->cases[i].target.block);
11324 Weight weight = Weight(data->cases[i].target.count);
11325 cases.append(SwitchCase(value, block, weight));
11326 }
11327
11328 m_out.switchInstruction(
11329 stringImpl, cases, lowBlock(data->fallThrough.block),
11330 Weight(data->fallThrough.count));
11331 return;
11332 }
11333
11334 case StringUse: {
11335 switchString(data, lowString(m_node->child1()), m_node->child1());
11336 return;
11337 }
11338
11339 case UntypedUse: {
11340 LValue value = lowJSValue(m_node->child1());
11341
11342 LBasicBlock isCellBlock = m_out.newBlock();
11343 LBasicBlock isStringBlock = m_out.newBlock();
11344
11345 m_out.branch(
11346 isCell(value, provenType(m_node->child1())),
11347 unsure(isCellBlock), unsure(lowBlock(data->fallThrough.block)));
11348
11349 LBasicBlock lastNext = m_out.appendTo(isCellBlock, isStringBlock);
11350
11351 m_out.branch(
11352 isString(value, provenType(m_node->child1())),
11353 unsure(isStringBlock), unsure(lowBlock(data->fallThrough.block)));
11354
11355 m_out.appendTo(isStringBlock, lastNext);
11356
11357 switchString(data, value, m_node->child1());
11358 return;
11359 }
11360
11361 default:
11362 DFG_CRASH(m_graph, m_node, "Bad use kind");
11363 return;
11364 }
11365 return;
11366 }
11367
11368 case SwitchCell: {
11369 LValue cell;
11370 switch (m_node->child1().useKind()) {
11371 case CellUse: {
11372 cell = lowCell(m_node->child1());
11373 break;
11374 }
11375
11376 case UntypedUse: {
11377 LValue value = lowJSValue(m_node->child1());
11378 LBasicBlock cellCase = m_out.newBlock();
11379 m_out.branch(
11380 isCell(value, provenType(m_node->child1())),
11381 unsure(cellCase), unsure(lowBlock(data->fallThrough.block)));
11382 m_out.appendTo(cellCase);
11383 cell = value;
11384 break;
11385 }
11386
11387 default:
11388 DFG_CRASH(m_graph, m_node, "Bad use kind");
11389 return;
11390 }
11391
11392 buildSwitch(m_node->switchData(), pointerType(), cell);
11393 return;
11394 } }
11395
11396 DFG_CRASH(m_graph, m_node, "Bad switch kind");
11397 }
11398
11399 void compileEntrySwitch()
11400 {
11401 Vector<LBasicBlock> successors;
11402 for (DFG::BasicBlock* successor : m_node->entrySwitchData()->cases)
11403 successors.append(lowBlock(successor));
11404 m_out.entrySwitch(successors);
11405 }
11406
11407 void compileReturn()
11408 {
11409 m_out.ret(lowJSValue(m_node->child1()));
11410 }
11411
11412 void compileForceOSRExit()
11413 {
11414 terminate(InadequateCoverage);
11415 }
11416
11417 void compileCPUIntrinsic()
11418 {
11419#if CPU(X86_64)
11420 Intrinsic intrinsic = m_node->intrinsic();
11421 switch (intrinsic) {
11422 case CPUMfenceIntrinsic:
11423 case CPUCpuidIntrinsic:
11424 case CPUPauseIntrinsic: {
11425 PatchpointValue* patchpoint = m_out.patchpoint(Void);
11426 patchpoint->effects = Effects::forCall();
11427 if (intrinsic == CPUCpuidIntrinsic)
11428 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::ebx, X86Registers::ecx, X86Registers::edx });
11429
11430 patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
11431 switch (intrinsic) {
11432 case CPUMfenceIntrinsic:
11433 jit.mfence();
11434 break;
11435 case CPUCpuidIntrinsic:
11436 jit.cpuid();
11437 break;
11438 case CPUPauseIntrinsic:
11439 jit.pause();
11440 break;
11441 default:
11442 RELEASE_ASSERT_NOT_REACHED();
11443 }
11444 });
11445 setJSValue(m_out.constInt64(JSValue::encode(jsUndefined())));
11446 break;
11447 }
11448 case CPURdtscIntrinsic: {
11449 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
11450 patchpoint->effects = Effects::forCall();
11451 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::edx });
11452 // The low 32-bits of rdtsc go into rax.
11453 patchpoint->resultConstraints = { ValueRep::reg(X86Registers::eax) };
11454 patchpoint->setGenerator( [=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
11455 jit.rdtsc();
11456 });
11457 setJSValue(boxInt32(patchpoint));
11458 break;
11459 }
11460 default:
11461 RELEASE_ASSERT_NOT_REACHED();
11462
11463 }
11464#endif
11465 }
11466
11467 void compileThrow()
11468 {
11469 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
11470 LValue error = lowJSValue(m_node->child1());
11471 vmCall(Void, operationThrowDFG, weakPointer(globalObject), error);
11472 // vmCall() does an exception check so we should never reach this.
11473 m_out.unreachable();
11474 }
11475
11476 void compileThrowStaticError()
11477 {
11478 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
11479 LValue errorMessage = lowString(m_node->child1());
11480 LValue errorType = m_out.constInt32(m_node->errorType());
11481 vmCall(Void, operationThrowStaticError, weakPointer(globalObject), errorMessage, errorType);
11482 // vmCall() does an exception check so we should never reach this.
11483 m_out.unreachable();
11484 }
11485
11486 void compileInvalidationPoint()
11487 {
11488 if (verboseCompilationEnabled())
11489 dataLog(" Invalidation point with availability: ", availabilityMap(), "\n");
11490
11491 DFG_ASSERT(m_graph, m_node, m_origin.exitOK);
11492
11493 PatchpointValue* patchpoint = m_out.patchpoint(Void);
11494 OSRExitDescriptor* descriptor = appendOSRExitDescriptor(noValue(), nullptr);
11495 NodeOrigin origin = m_origin;
11496 patchpoint->appendColdAnys(buildExitArguments(descriptor, origin.forExit, noValue()));
11497
11498 State* state = &m_ftlState;
11499
11500 auto nodeIndex = m_nodeIndexInGraph;
11501 patchpoint->setGenerator(
11502 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
11503 // The MacroAssembler knows more about this than B3 does. The watchpointLabel() method
11504 // will ensure that this is followed by a nop shadow but only when this is actually
11505 // necessary.
11506 CCallHelpers::Label label = jit.watchpointLabel();
11507
11508 RefPtr<OSRExitHandle> handle = descriptor->emitOSRExitLater(
11509 *state, UncountableInvalidation, origin, params, nodeIndex, 0);
11510
11511 RefPtr<JITCode> jitCode = state->jitCode.get();
11512
11513 jit.addLinkTask(
11514 [=] (LinkBuffer& linkBuffer) {
11515 JumpReplacement jumpReplacement(
11516 linkBuffer.locationOf<JSInternalPtrTag>(label),
11517 linkBuffer.locationOf<OSRExitPtrTag>(handle->label));
11518 jitCode->common.m_jumpReplacements.append(jumpReplacement);
11519 });
11520 });
11521
11522 // Set some obvious things.
11523 patchpoint->effects.terminal = false;
11524 patchpoint->effects.writesLocalState = false;
11525 patchpoint->effects.readsLocalState = false;
11526
11527 // This is how we tell B3 about the possibility of jump replacement.
11528 patchpoint->effects.exitsSideways = true;
11529
11530 // It's not possible for some prior branch to determine the safety of this operation. It's always
11531 // fine to execute this on some path that wouldn't have originally executed it before
11532 // optimization.
11533 patchpoint->effects.controlDependent = false;
11534
11535 // If this falls through then it won't write anything.
11536 patchpoint->effects.writes = HeapRange();
11537
11538 // When this abruptly terminates, it could read any heap location.
11539 patchpoint->effects.reads = HeapRange::top();
11540 }
11541
11542 void compileIsEmpty()
11543 {
11544 setBoolean(m_out.isZero64(lowJSValue(m_node->child1())));
11545 }
11546
11547 void compileTypeOfIsUndefined()
11548 {
11549 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined));
11550 }
11551
11552 void compileIsUndefinedOrNull()
11553 {
11554 setBoolean(isOther(lowJSValue(m_node->child1()), provenType(m_node->child1())));
11555 }
11556
11557 void compileIsBoolean()
11558 {
11559 setBoolean(isBoolean(lowJSValue(m_node->child1()), provenType(m_node->child1())));
11560 }
11561
11562 void compileIsNumber()
11563 {
11564 setBoolean(isNumber(lowJSValue(m_node->child1()), provenType(m_node->child1())));
11565 }
11566
11567 void compileNumberIsInteger()
11568 {
11569 LBasicBlock notInt32 = m_out.newBlock();
11570 LBasicBlock doubleCase = m_out.newBlock();
11571 LBasicBlock doubleNotNanOrInf = m_out.newBlock();
11572 LBasicBlock continuation = m_out.newBlock();
11573
11574 LValue input = lowJSValue(m_node->child1());
11575
11576 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
11577 m_out.branch(
11578 isInt32(input, provenType(m_node->child1())), unsure(continuation), unsure(notInt32));
11579
11580 LBasicBlock lastNext = m_out.appendTo(notInt32, doubleCase);
11581 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
11582 m_out.branch(
11583 isNotNumber(input, provenType(m_node->child1())), unsure(continuation), unsure(doubleCase));
11584
11585 m_out.appendTo(doubleCase, doubleNotNanOrInf);
11586 LValue doubleAsInt;
11587 LValue asDouble = unboxDouble(input, &doubleAsInt);
11588 LValue expBits = m_out.bitAnd(m_out.lShr(doubleAsInt, m_out.constInt32(52)), m_out.constInt64(0x7ff));
11589 m_out.branch(
11590 m_out.equal(expBits, m_out.constInt64(0x7ff)),
11591 unsure(continuation), unsure(doubleNotNanOrInf));
11592
11593 m_out.appendTo(doubleNotNanOrInf, continuation);
11594 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
11595 patchpoint->appendSomeRegister(asDouble);
11596 patchpoint->numFPScratchRegisters = 1;
11597 patchpoint->effects = Effects::none();
11598 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
11599 GPRReg result = params[0].gpr();
11600 FPRReg input = params[1].fpr();
11601 FPRReg temp = params.fpScratch(0);
11602 jit.roundTowardZeroDouble(input, temp);
11603 jit.compareDouble(MacroAssembler::DoubleEqualAndOrdered, input, temp, result);
11604 });
11605 ValueFromBlock patchpointResult = m_out.anchor(patchpoint);
11606 m_out.jump(continuation);
11607
11608 m_out.appendTo(continuation, lastNext);
11609 setBoolean(m_out.phi(Int32, trueResult, falseResult, patchpointResult));
11610 }
11611
11612#if USE(BIGINT32)
11613 void compileIsBigInt()
11614 {
11615 LValue value = lowJSValue(m_node->child1());
11616
11617 LBasicBlock isCellCase = m_out.newBlock();
11618 LBasicBlock isNotCellCase = m_out.newBlock();
11619 LBasicBlock continuation = m_out.newBlock();
11620
11621 m_out.branch(isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(isNotCellCase));
11622
11623 LBasicBlock lastNext = m_out.appendTo(isNotCellCase, isCellCase);
11624 // FIXME: we should filter the provenType to include the fact that we know we are not dealing with a cell
11625 ValueFromBlock notCellResult = m_out.anchor(isBigInt32(value, provenType(m_node->child1())));
11626 m_out.jump(continuation);
11627
11628 m_out.appendTo(isCellCase, continuation);
11629 ValueFromBlock cellResult = m_out.anchor(isCellWithType(value, m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
11630 m_out.jump(continuation);
11631
11632 m_out.appendTo(continuation, lastNext);
11633 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
11634 }
11635#else // if !USE(BIGINT32)
11636 NO_RETURN_DUE_TO_CRASH ALWAYS_INLINE void compileIsBigInt()
11637 {
11638 // If we are not dealing with BigInt32, we should just emit IsCellWithType(HeapBigInt) instead.
11639 RELEASE_ASSERT_NOT_REACHED();
11640 }
11641#endif
11642 void compileIsCellWithType()
11643 {
11644 if (m_node->child1().useKind() == UntypedUse) {
11645 LValue value = lowJSValue(m_node->child1());
11646
11647 LBasicBlock isCellCase = m_out.newBlock();
11648 LBasicBlock continuation = m_out.newBlock();
11649
11650 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
11651 m_out.branch(
11652 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
11653
11654 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
11655 ValueFromBlock cellResult = m_out.anchor(isCellWithType(value, m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
11656 m_out.jump(continuation);
11657
11658 m_out.appendTo(continuation, lastNext);
11659 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
11660 } else {
11661 ASSERT(m_node->child1().useKind() == CellUse);
11662 setBoolean(isCellWithType(lowCell(m_node->child1()), m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
11663 }
11664 }
11665
11666 void compileIsObject()
11667 {
11668 LValue value = lowJSValue(m_node->child1());
11669
11670 LBasicBlock isCellCase = m_out.newBlock();
11671 LBasicBlock continuation = m_out.newBlock();
11672
11673 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
11674 m_out.branch(
11675 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
11676
11677 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
11678 ValueFromBlock cellResult = m_out.anchor(isObject(value, provenType(m_node->child1())));
11679 m_out.jump(continuation);
11680
11681 m_out.appendTo(continuation, lastNext);
11682 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
11683 }
11684
11685 LValue wangsInt64Hash(LValue input)
11686 {
11687 // key += ~(key << 32);
11688 LValue key = input;
11689 LValue temp = key;
11690 temp = m_out.shl(temp, m_out.constInt32(32));
11691 temp = m_out.bitNot(temp);
11692 key = m_out.add(key, temp);
11693 // key ^= (key >> 22);
11694 temp = key;
11695 temp = m_out.lShr(temp, m_out.constInt32(22));
11696 key = m_out.bitXor(key, temp);
11697 // key += ~(key << 13);
11698 temp = key;
11699 temp = m_out.shl(temp, m_out.constInt32(13));
11700 temp = m_out.bitNot(temp);
11701 key = m_out.add(key, temp);
11702 // key ^= (key >> 8);
11703 temp = key;
11704 temp = m_out.lShr(temp, m_out.constInt32(8));
11705 key = m_out.bitXor(key, temp);
11706 // key += (key << 3);
11707 temp = key;
11708 temp = m_out.shl(temp, m_out.constInt32(3));
11709 key = m_out.add(key, temp);
11710 // key ^= (key >> 15);
11711 temp = key;
11712 temp = m_out.lShr(temp, m_out.constInt32(15));
11713 key = m_out.bitXor(key, temp);
11714 // key += ~(key << 27);
11715 temp = key;
11716 temp = m_out.shl(temp, m_out.constInt32(27));
11717 temp = m_out.bitNot(temp);
11718 key = m_out.add(key, temp);
11719 // key ^= (key >> 31);
11720 temp = key;
11721 temp = m_out.lShr(temp, m_out.constInt32(31));
11722 key = m_out.bitXor(key, temp);
11723 key = m_out.castToInt32(key);
11724
11725 return key;
11726 }
11727
11728 LValue mapHashString(LValue string, Edge& edge)
11729 {
11730 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
11731 LBasicBlock nonEmptyStringCase = m_out.newBlock();
11732 LBasicBlock slowCase = m_out.newBlock();
11733 LBasicBlock continuation = m_out.newBlock();
11734
11735 m_out.branch(isRopeString(string, edge), rarely(slowCase), usually(nonEmptyStringCase));
11736
11737 LBasicBlock lastNext = m_out.appendTo(nonEmptyStringCase, slowCase);
11738 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
11739 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
11740 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
11741 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
11742 unsure(slowCase), unsure(continuation));
11743
11744 m_out.appendTo(slowCase, continuation);
11745 ValueFromBlock slowResult = m_out.anchor(m_out.castToInt32(vmCall(Int64, operationMapHash, weakPointer(globalObject), string)));
11746 m_out.jump(continuation);
11747
11748 m_out.appendTo(continuation, lastNext);
11749 return m_out.phi(Int32, slowResult, nonEmptyStringHashResult);
11750 }
11751
11752 void compileMapHash()
11753 {
11754 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
11755 switch (m_node->child1().useKind()) {
11756#if USE(BIGINT32)
11757 case BigInt32Use:
11758#endif
11759 case BooleanUse:
11760 case Int32Use:
11761 case SymbolUse:
11762 case ObjectUse: {
11763 LValue key = lowJSValue(m_node->child1(), ManualOperandSpeculation);
11764 speculate(m_node->child1());
11765 setInt32(wangsInt64Hash(key));
11766 return;
11767 }
11768
11769 case HeapBigIntUse: {
11770 LValue key = lowHeapBigInt(m_node->child1());
11771 setInt32(m_out.castToInt32(vmCall(Int64, operationMapHashHeapBigInt, m_vmValue, key)));
11772 return;
11773 }
11774
11775 case CellUse: {
11776 LBasicBlock isString = m_out.newBlock();
11777 LBasicBlock notString = m_out.newBlock();
11778 LBasicBlock isHeapBigIntCase = m_out.newBlock();
11779 LBasicBlock notStringNorHeapBigIntCase = m_out.newBlock();
11780 LBasicBlock continuation = m_out.newBlock();
11781
11782 LValue value = lowCell(m_node->child1());
11783 LValue isStringValue = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
11784 m_out.branch(
11785 isStringValue, unsure(isString), unsure(notString));
11786
11787 LBasicBlock lastNext = m_out.appendTo(isString, notString);
11788 ValueFromBlock stringResult = m_out.anchor(mapHashString(value, m_node->child1()));
11789 m_out.jump(continuation);
11790
11791 m_out.appendTo(notString, isHeapBigIntCase);
11792 m_out.branch(isHeapBigInt(value, (provenType(m_node->child1()) & ~SpecString)), unsure(isHeapBigIntCase), unsure(notStringNorHeapBigIntCase));
11793
11794 m_out.appendTo(isHeapBigIntCase, notStringNorHeapBigIntCase);
11795 ValueFromBlock heapBigIntResult = m_out.anchor(m_out.castToInt32(vmCall(Int64, operationMapHashHeapBigInt, m_vmValue, value)));
11796 m_out.jump(continuation);
11797
11798 m_out.appendTo(notStringNorHeapBigIntCase, continuation);
11799 ValueFromBlock notStringResult = m_out.anchor(wangsInt64Hash(value));
11800 m_out.jump(continuation);
11801
11802 m_out.appendTo(continuation, lastNext);
11803 setInt32(m_out.phi(Int32, stringResult, heapBigIntResult, notStringResult));
11804 return;
11805 }
11806
11807 case StringUse: {
11808 LValue string = lowString(m_node->child1());
11809 setInt32(mapHashString(string, m_node->child1()));
11810 return;
11811 }
11812
11813 default:
11814 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse);
11815 break;
11816 }
11817
11818 LValue value = lowJSValue(m_node->child1());
11819
11820 LBasicBlock isCellCase = m_out.newBlock();
11821 LBasicBlock slowCase = m_out.newBlock();
11822 LBasicBlock straightHash = m_out.newBlock();
11823 LBasicBlock isStringCase = m_out.newBlock();
11824 LBasicBlock notStringCase = m_out.newBlock();
11825 LBasicBlock nonEmptyStringCase = m_out.newBlock();
11826 LBasicBlock continuation = m_out.newBlock();
11827
11828 m_out.branch(
11829 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(straightHash));
11830
11831 LBasicBlock lastNext = m_out.appendTo(isCellCase, notStringCase);
11832 LValue isString = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
11833 m_out.branch(
11834 isString, unsure(isStringCase), unsure(notStringCase));
11835
11836 m_out.appendTo(notStringCase, isStringCase);
11837 m_out.branch(isHeapBigInt(value, (provenType(m_node->child1()) & ~SpecString)), unsure(slowCase), unsure(straightHash));
11838
11839 m_out.appendTo(isStringCase, nonEmptyStringCase);
11840 m_out.branch(isRopeString(value, m_node->child1()), rarely(slowCase), usually(nonEmptyStringCase));
11841
11842 m_out.appendTo(nonEmptyStringCase, straightHash);
11843 LValue stringImpl = m_out.loadPtr(value, m_heaps.JSString_value);
11844 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
11845 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
11846 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
11847 unsure(slowCase), unsure(continuation));
11848
11849 m_out.appendTo(straightHash, slowCase);
11850 ValueFromBlock fastResult = m_out.anchor(wangsInt64Hash(value));
11851 m_out.jump(continuation);
11852
11853 m_out.appendTo(slowCase, continuation);
11854 ValueFromBlock slowResult = m_out.anchor(m_out.castToInt32(vmCall(Int64, operationMapHash, weakPointer(globalObject), value)));
11855 m_out.jump(continuation);
11856
11857 m_out.appendTo(continuation, lastNext);
11858 setInt32(m_out.phi(Int32, fastResult, slowResult, nonEmptyStringHashResult));
11859 }
11860
11861 void compileNormalizeMapKey()
11862 {
11863 ASSERT(m_node->child1().useKind() == UntypedUse);
11864
11865 LBasicBlock isCellCase = m_out.newBlock();
11866 LBasicBlock notCellCase = m_out.newBlock();
11867 LBasicBlock isHeapBigIntCase = m_out.newBlock();
11868 LBasicBlock isNumberCase = m_out.newBlock();
11869 LBasicBlock notInt32NumberCase = m_out.newBlock();
11870 LBasicBlock notNaNCase = m_out.newBlock();
11871 LBasicBlock convertibleCase = m_out.newBlock();
11872 LBasicBlock continuation = m_out.newBlock();
11873
11874 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isCellCase);
11875
11876 LValue key = lowJSValue(m_node->child1());
11877 ValueFromBlock fastResult = m_out.anchor(key);
11878 m_out.branch(isNotCell(key, provenType(m_node->child1())), unsure(notCellCase), unsure(isCellCase));
11879
11880 m_out.appendTo(isCellCase, isHeapBigIntCase);
11881 m_out.branch(isNotHeapBigInt(key, (provenType(m_node->child1()) & SpecCellCheck)), unsure(continuation), unsure(isHeapBigIntCase));
11882
11883 m_out.appendTo(isHeapBigIntCase, notCellCase);
11884 ValueFromBlock bigIntResult = m_out.anchor(vmCall(Int64, operationNormalizeMapKeyHeapBigInt, m_vmValue, key));
11885 m_out.jump(continuation);
11886
11887 m_out.appendTo(notCellCase, isNumberCase);
11888 m_out.branch(isNotNumber(key), unsure(continuation), unsure(isNumberCase));
11889
11890 m_out.appendTo(isNumberCase, notInt32NumberCase);
11891 m_out.branch(isInt32(key), unsure(continuation), unsure(notInt32NumberCase));
11892
11893 m_out.appendTo(notInt32NumberCase, notNaNCase);
11894 LValue doubleValue = unboxDouble(key);
11895 ValueFromBlock normalizedNaNResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsNaN())));
11896 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue), unsure(continuation), unsure(notNaNCase));
11897
11898 m_out.appendTo(notNaNCase, convertibleCase);
11899 LValue integerValue = m_out.doubleToInt(doubleValue);
11900 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
11901 ValueFromBlock doubleResult = m_out.anchor(key);
11902 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, integerValueConvertedToDouble), unsure(continuation), unsure(convertibleCase));
11903
11904 m_out.appendTo(convertibleCase, continuation);
11905 ValueFromBlock boxedIntResult = m_out.anchor(boxInt32(integerValue));
11906 m_out.jump(continuation);
11907
11908 m_out.appendTo(continuation, lastNext);
11909 setJSValue(m_out.phi(Int64, fastResult, bigIntResult, normalizedNaNResult, doubleResult, boxedIntResult));
11910 }
11911
11912 void compileGetMapBucket()
11913 {
11914 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
11915 LBasicBlock loopStart = m_out.newBlock();
11916 LBasicBlock loopAround = m_out.newBlock();
11917 LBasicBlock slowPath = m_out.newBlock();
11918 LBasicBlock notPresentInTable = m_out.newBlock();
11919 LBasicBlock notEmptyValue = m_out.newBlock();
11920 LBasicBlock notDeletedValue = m_out.newBlock();
11921 LBasicBlock continuation = m_out.newBlock();
11922
11923 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
11924
11925 LValue map;
11926 if (m_node->child1().useKind() == MapObjectUse)
11927 map = lowMapObject(m_node->child1());
11928 else if (m_node->child1().useKind() == SetObjectUse)
11929 map = lowSetObject(m_node->child1());
11930 else
11931 RELEASE_ASSERT_NOT_REACHED();
11932
11933 LValue key = lowJSValue(m_node->child2(), ManualOperandSpeculation);
11934 if (m_node->child2().useKind() != UntypedUse)
11935 speculate(m_node->child2());
11936
11937 LValue hash = lowInt32(m_node->child3());
11938
11939 LValue buffer = m_out.loadPtr(map, m_heaps.HashMapImpl_buffer);
11940 LValue mask = m_out.sub(m_out.load32(map, m_heaps.HashMapImpl_capacity), m_out.int32One);
11941
11942 ValueFromBlock indexStart = m_out.anchor(hash);
11943 m_out.jump(loopStart);
11944
11945 m_out.appendTo(loopStart, notEmptyValue);
11946 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
11947 LValue index = m_out.bitAnd(mask, unmaskedIndex);
11948 // FIXME: I think these buffers are caged?
11949 // https://bugs.webkit.org/show_bug.cgi?id=174925
11950 LValue hashMapBucket = m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), buffer, m_out.zeroExt(index, Int64), ScaleEight));
11951 ValueFromBlock bucketResult = m_out.anchor(hashMapBucket);
11952 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue()))),
11953 unsure(notPresentInTable), unsure(notEmptyValue));
11954
11955 m_out.appendTo(notEmptyValue, notDeletedValue);
11956 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue()))),
11957 unsure(loopAround), unsure(notDeletedValue));
11958
11959 m_out.appendTo(notDeletedValue, loopAround);
11960 LValue bucketKey = m_out.load64(hashMapBucket, m_heaps.HashMapBucket_key);
11961
11962 // Perform Object.is()
11963 switch (m_node->child2().useKind()) {
11964 case BooleanUse:
11965#if USE(BIGINT32)
11966 case BigInt32Use:
11967#endif
11968 case Int32Use:
11969 case SymbolUse:
11970 case ObjectUse: {
11971 m_out.branch(m_out.equal(key, bucketKey),
11972 unsure(continuation), unsure(loopAround));
11973 break;
11974 }
11975 case StringUse: {
11976 LBasicBlock notBitEqual = m_out.newBlock();
11977 LBasicBlock bucketKeyIsCell = m_out.newBlock();
11978
11979 m_out.branch(m_out.equal(key, bucketKey),
11980 unsure(continuation), unsure(notBitEqual));
11981
11982 m_out.appendTo(notBitEqual, bucketKeyIsCell);
11983 m_out.branch(isCell(bucketKey),
11984 unsure(bucketKeyIsCell), unsure(loopAround));
11985
11986 m_out.appendTo(bucketKeyIsCell, loopAround);
11987 m_out.branch(isString(bucketKey),
11988 unsure(slowPath), unsure(loopAround));
11989 break;
11990 }
11991 case HeapBigIntUse: {
11992 LBasicBlock notBitEqual = m_out.newBlock();
11993 LBasicBlock bucketKeyIsCell = m_out.newBlock();
11994
11995 m_out.branch(m_out.equal(key, bucketKey),
11996 unsure(continuation), unsure(notBitEqual));
11997
11998 m_out.appendTo(notBitEqual, bucketKeyIsCell);
11999 m_out.branch(isCell(bucketKey),
12000 unsure(bucketKeyIsCell), unsure(loopAround));
12001
12002 m_out.appendTo(bucketKeyIsCell, loopAround);
12003 m_out.branch(isHeapBigInt(bucketKey),
12004 unsure(slowPath), unsure(loopAround));
12005 break;
12006 }
12007 case CellUse: {
12008 LBasicBlock notBitEqual = m_out.newBlock();
12009 LBasicBlock bucketKeyIsCell = m_out.newBlock();
12010 LBasicBlock bucketKeyIsString = m_out.newBlock();
12011 LBasicBlock bucketKeyIsNotString = m_out.newBlock();
12012 LBasicBlock bucketKeyIsHeapBigInt = m_out.newBlock();
12013
12014 m_out.branch(m_out.equal(key, bucketKey),
12015 unsure(continuation), unsure(notBitEqual));
12016
12017 m_out.appendTo(notBitEqual, bucketKeyIsCell);
12018 m_out.branch(isCell(bucketKey),
12019 unsure(bucketKeyIsCell), unsure(loopAround));
12020
12021 m_out.appendTo(bucketKeyIsCell, bucketKeyIsString);
12022 m_out.branch(isString(bucketKey),
12023 unsure(bucketKeyIsString), unsure(bucketKeyIsNotString));
12024
12025 m_out.appendTo(bucketKeyIsString, bucketKeyIsNotString);
12026 m_out.branch(isString(key, provenType(m_node->child2())),
12027 unsure(slowPath), unsure(loopAround));
12028
12029 m_out.appendTo(bucketKeyIsNotString, bucketKeyIsHeapBigInt);
12030 m_out.branch(isHeapBigInt(bucketKey),
12031 unsure(bucketKeyIsHeapBigInt), unsure(loopAround));
12032
12033 m_out.appendTo(bucketKeyIsHeapBigInt, loopAround);
12034 m_out.branch(isHeapBigInt(key, provenType(m_node->child2())),
12035 unsure(slowPath), unsure(loopAround));
12036 break;
12037 }
12038 case UntypedUse: {
12039 LBasicBlock notBitEqual = m_out.newBlock();
12040 LBasicBlock bucketKeyIsCell = m_out.newBlock();
12041 LBasicBlock bothAreCells = m_out.newBlock();
12042 LBasicBlock bucketKeyIsString = m_out.newBlock();
12043 LBasicBlock bucketKeyIsNotString = m_out.newBlock();
12044 LBasicBlock bucketKeyIsHeapBigInt = m_out.newBlock();
12045
12046 m_out.branch(m_out.equal(key, bucketKey),
12047 unsure(continuation), unsure(notBitEqual));
12048
12049 m_out.appendTo(notBitEqual, bucketKeyIsCell);
12050 m_out.branch(isCell(bucketKey),
12051 unsure(bucketKeyIsCell), unsure(loopAround));
12052
12053 m_out.appendTo(bucketKeyIsCell, bothAreCells);
12054 m_out.branch(isCell(key),
12055 unsure(bothAreCells), unsure(loopAround));
12056
12057 m_out.appendTo(bothAreCells, bucketKeyIsString);
12058 m_out.branch(isString(bucketKey),
12059 unsure(bucketKeyIsString), unsure(bucketKeyIsNotString));
12060
12061 m_out.appendTo(bucketKeyIsString, bucketKeyIsNotString);
12062 m_out.branch(isString(key, provenType(m_node->child2())),
12063 unsure(slowPath), unsure(loopAround));
12064
12065 m_out.appendTo(bucketKeyIsNotString, bucketKeyIsHeapBigInt);
12066 m_out.branch(isHeapBigInt(bucketKey),
12067 unsure(bucketKeyIsHeapBigInt), unsure(loopAround));
12068
12069 m_out.appendTo(bucketKeyIsHeapBigInt, loopAround);
12070 m_out.branch(isHeapBigInt(key, provenType(m_node->child2())),
12071 unsure(slowPath), unsure(loopAround));
12072 break;
12073 }
12074 default:
12075 RELEASE_ASSERT_NOT_REACHED();
12076 }
12077
12078 m_out.appendTo(loopAround, slowPath);
12079 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
12080 m_out.jump(loopStart);
12081
12082 m_out.appendTo(slowPath, notPresentInTable);
12083 ValueFromBlock slowPathResult = m_out.anchor(vmCall(pointerType(),
12084 m_node->child1().useKind() == MapObjectUse ? operationJSMapFindBucket : operationJSSetFindBucket, weakPointer(globalObject), map, key, hash));
12085 m_out.jump(continuation);
12086
12087 m_out.appendTo(notPresentInTable, continuation);
12088 ValueFromBlock notPresentResult;
12089 if (m_node->child1().useKind() == MapObjectUse)
12090 notPresentResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
12091 else if (m_node->child1().useKind() == SetObjectUse)
12092 notPresentResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
12093 else
12094 RELEASE_ASSERT_NOT_REACHED();
12095 m_out.jump(continuation);
12096
12097 m_out.appendTo(continuation, lastNext);
12098 setJSValue(m_out.phi(pointerType(), bucketResult, slowPathResult, notPresentResult));
12099 }
12100
12101 void compileGetMapBucketHead()
12102 {
12103 LValue map;
12104 if (m_node->child1().useKind() == MapObjectUse)
12105 map = lowMapObject(m_node->child1());
12106 else if (m_node->child1().useKind() == SetObjectUse)
12107 map = lowSetObject(m_node->child1());
12108 else
12109 RELEASE_ASSERT_NOT_REACHED();
12110
12111 ASSERT(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfHead() == HashMapImpl<HashMapBucket<HashMapBucketDataKeyValue>>::offsetOfHead());
12112 setJSValue(m_out.loadPtr(map, m_heaps.HashMapImpl_head));
12113 }
12114
12115 void compileGetMapBucketNext()
12116 {
12117 LBasicBlock loopStart = m_out.newBlock();
12118 LBasicBlock continuation = m_out.newBlock();
12119 LBasicBlock noBucket = m_out.newBlock();
12120 LBasicBlock hasBucket = m_out.newBlock();
12121 LBasicBlock nextBucket = m_out.newBlock();
12122
12123 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
12124
12125 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfNext() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfNext());
12126 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey());
12127 LValue mapBucketPrev = lowCell(m_node->child1());
12128 ValueFromBlock mapBucketStart = m_out.anchor(m_out.loadPtr(mapBucketPrev, m_heaps.HashMapBucket_next));
12129 m_out.jump(loopStart);
12130
12131 m_out.appendTo(loopStart, noBucket);
12132 LValue mapBucket = m_out.phi(pointerType(), mapBucketStart);
12133 m_out.branch(m_out.isNull(mapBucket), unsure(noBucket), unsure(hasBucket));
12134
12135 m_out.appendTo(noBucket, hasBucket);
12136 ValueFromBlock noBucketResult;
12137 if (m_node->bucketOwnerType() == BucketOwnerType::Map)
12138 noBucketResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
12139 else {
12140 ASSERT(m_node->bucketOwnerType() == BucketOwnerType::Set);
12141 noBucketResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
12142 }
12143 m_out.jump(continuation);
12144
12145 m_out.appendTo(hasBucket, nextBucket);
12146 ValueFromBlock bucketResult = m_out.anchor(mapBucket);
12147 m_out.branch(m_out.isZero64(m_out.load64(mapBucket, m_heaps.HashMapBucket_key)), unsure(nextBucket), unsure(continuation));
12148
12149 m_out.appendTo(nextBucket, continuation);
12150 m_out.addIncomingToPhi(mapBucket, m_out.anchor(m_out.loadPtr(mapBucket, m_heaps.HashMapBucket_next)));
12151 m_out.jump(loopStart);
12152
12153 m_out.appendTo(continuation, lastNext);
12154 setJSValue(m_out.phi(pointerType(), noBucketResult, bucketResult));
12155 }
12156
12157 void compileLoadValueFromMapBucket()
12158 {
12159 LValue mapBucket = lowCell(m_node->child1());
12160 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_value));
12161 }
12162
12163 void compileExtractValueFromWeakMapGet()
12164 {
12165 LValue value = lowJSValue(m_node->child1());
12166 setJSValue(m_out.select(m_out.isZero64(value),
12167 m_out.constInt64(JSValue::encode(jsUndefined())),
12168 value));
12169 }
12170
12171 void compileLoadKeyFromMapBucket()
12172 {
12173 LValue mapBucket = lowCell(m_node->child1());
12174 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_key));
12175 }
12176
12177 void compileSetAdd()
12178 {
12179 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12180 LValue set = lowSetObject(m_node->child1());
12181 LValue key = lowJSValue(m_node->child2());
12182 LValue hash = lowInt32(m_node->child3());
12183
12184 setJSValue(vmCall(pointerType(), operationSetAdd, weakPointer(globalObject), set, key, hash));
12185 }
12186
12187 void compileMapSet()
12188 {
12189 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12190 LValue map = lowMapObject(m_graph.varArgChild(m_node, 0));
12191 LValue key = lowJSValue(m_graph.varArgChild(m_node, 1));
12192 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
12193 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
12194
12195 setJSValue(vmCall(pointerType(), operationMapSet, weakPointer(globalObject), map, key, value, hash));
12196 }
12197
12198 void compileWeakMapGet()
12199 {
12200 LBasicBlock loopStart = m_out.newBlock();
12201 LBasicBlock loopAround = m_out.newBlock();
12202 LBasicBlock notEqualValue = m_out.newBlock();
12203 LBasicBlock continuation = m_out.newBlock();
12204
12205 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
12206
12207 LValue weakMap;
12208 if (m_node->child1().useKind() == WeakMapObjectUse)
12209 weakMap = lowWeakMapObject(m_node->child1());
12210 else if (m_node->child1().useKind() == WeakSetObjectUse)
12211 weakMap = lowWeakSetObject(m_node->child1());
12212 else
12213 RELEASE_ASSERT_NOT_REACHED();
12214 LValue key = lowObject(m_node->child2());
12215 LValue hash = lowInt32(m_node->child3());
12216
12217 LValue buffer = m_out.loadPtr(weakMap, m_heaps.WeakMapImpl_buffer);
12218 LValue mask = m_out.sub(m_out.load32(weakMap, m_heaps.WeakMapImpl_capacity), m_out.int32One);
12219
12220 ValueFromBlock indexStart = m_out.anchor(hash);
12221 m_out.jump(loopStart);
12222
12223 m_out.appendTo(loopStart, notEqualValue);
12224 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
12225 LValue index = m_out.bitAnd(mask, unmaskedIndex);
12226
12227 LValue bucket;
12228
12229 if (m_node->child1().useKind() == WeakMapObjectUse) {
12230 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)), "Should be a power of 2");
12231 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)))));
12232 } else {
12233 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)), "Should be a power of 2");
12234 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)))));
12235 }
12236
12237 LValue bucketKey = m_out.load64(bucket, m_heaps.WeakMapBucket_key);
12238 m_out.branch(m_out.equal(key, bucketKey), unsure(continuation), unsure(notEqualValue));
12239
12240 m_out.appendTo(notEqualValue, loopAround);
12241 m_out.branch(m_out.isNull(bucketKey), unsure(continuation), unsure(loopAround));
12242
12243 m_out.appendTo(loopAround, continuation);
12244 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
12245 m_out.jump(loopStart);
12246
12247 m_out.appendTo(continuation, lastNext);
12248 LValue result;
12249 if (m_node->child1().useKind() == WeakMapObjectUse)
12250 result = m_out.load64(bucket, m_heaps.WeakMapBucket_value);
12251 else
12252 result = bucketKey;
12253 setJSValue(result);
12254 }
12255
12256 void compileWeakSetAdd()
12257 {
12258 LValue set = lowWeakSetObject(m_node->child1());
12259 LValue key = lowObject(m_node->child2());
12260 LValue hash = lowInt32(m_node->child3());
12261
12262 vmCall(Void, operationWeakSetAdd, m_vmValue, set, key, hash);
12263 }
12264
12265 void compileWeakMapSet()
12266 {
12267 LValue map = lowWeakMapObject(m_graph.varArgChild(m_node, 0));
12268 LValue key = lowObject(m_graph.varArgChild(m_node, 1));
12269 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
12270 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
12271
12272 vmCall(Void, operationWeakMapSet, m_vmValue, map, key, value, hash);
12273 }
12274
12275 void compileTypeOfIsObject()
12276 {
12277 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12278
12279 Edge child = m_node->child1();
12280 LValue value = lowJSValue(child);
12281
12282 LBasicBlock cellCase = m_out.newBlock();
12283 LBasicBlock notFunctionCase = m_out.newBlock();
12284 LBasicBlock objectCase = m_out.newBlock();
12285 LBasicBlock slowPath = m_out.newBlock();
12286 LBasicBlock notCellCase = m_out.newBlock();
12287 LBasicBlock continuation = m_out.newBlock();
12288
12289 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
12290
12291 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
12292 ValueFromBlock isFunctionResult = m_out.anchor(m_out.booleanFalse);
12293 m_out.branch(
12294 isFunction(value, provenType(child)),
12295 unsure(continuation), unsure(notFunctionCase));
12296
12297 m_out.appendTo(notFunctionCase, objectCase);
12298 ValueFromBlock notObjectResult = m_out.anchor(m_out.booleanFalse);
12299 m_out.branch(
12300 isObject(value, provenType(child)),
12301 unsure(objectCase), unsure(continuation));
12302
12303 m_out.appendTo(objectCase, slowPath);
12304 ValueFromBlock objectResult = m_out.anchor(m_out.booleanTrue);
12305 m_out.branch(
12306 isExoticForTypeof(value, provenType(child)),
12307 rarely(slowPath), usually(continuation));
12308
12309 m_out.appendTo(slowPath, notCellCase);
12310 VM& vm = this->vm();
12311 LValue slowResultValue = lazySlowPath(
12312 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
12313 return createLazyCallGenerator(vm,
12314 operationTypeOfIsObject, locations[0].directGPR(),
12315 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
12316 }, value);
12317 ValueFromBlock slowResult = m_out.anchor(m_out.notZero64(slowResultValue));
12318 m_out.jump(continuation);
12319
12320 m_out.appendTo(notCellCase, continuation);
12321 LValue notCellResultValue = m_out.equal(value, m_out.constInt64(JSValue::encode(jsNull())));
12322 ValueFromBlock notCellResult = m_out.anchor(notCellResultValue);
12323 m_out.jump(continuation);
12324
12325 m_out.appendTo(continuation, lastNext);
12326 LValue result = m_out.phi(
12327 Int32,
12328 isFunctionResult, notObjectResult, objectResult, slowResult, notCellResult);
12329 setBoolean(result);
12330 }
12331
12332 void compileIsCallable(S_JITOperation_GC slowPathOperation)
12333 {
12334 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12335
12336 Edge child = m_node->child1();
12337 LValue value = lowJSValue(child);
12338
12339 LBasicBlock cellCase = m_out.newBlock();
12340 LBasicBlock notFunctionCase = m_out.newBlock();
12341 LBasicBlock slowPath = m_out.newBlock();
12342 LBasicBlock continuation = m_out.newBlock();
12343
12344 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
12345 m_out.branch(
12346 isCell(value, provenType(child)), unsure(cellCase), unsure(continuation));
12347
12348 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
12349 ValueFromBlock functionResult = m_out.anchor(m_out.booleanTrue);
12350 m_out.branch(
12351 isFunction(value, provenType(child)),
12352 unsure(continuation), unsure(notFunctionCase));
12353
12354 m_out.appendTo(notFunctionCase, slowPath);
12355 ValueFromBlock objectResult = m_out.anchor(m_out.booleanFalse);
12356 m_out.branch(
12357 isExoticForTypeof(value, provenType(child)),
12358 rarely(slowPath), usually(continuation));
12359
12360 m_out.appendTo(slowPath, continuation);
12361 VM& vm = this->vm();
12362 LValue slowResultValue = lazySlowPath(
12363 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
12364 return createLazyCallGenerator(vm,
12365 slowPathOperation, locations[0].directGPR(),
12366 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
12367 }, value);
12368 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue));
12369 m_out.jump(continuation);
12370
12371 m_out.appendTo(continuation, lastNext);
12372 LValue result = m_out.phi(
12373 Int32, notCellResult, functionResult, objectResult, slowResult);
12374 setBoolean(result);
12375 }
12376
12377 void compileIsConstructor()
12378 {
12379 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12380 LValue value = lowJSValue(m_node->child1());
12381 setBoolean(vmCall(Int32, operationIsConstructor, weakPointer(globalObject), value));
12382 }
12383
12384 void compileIsTypedArrayView()
12385 {
12386 LValue value = lowJSValue(m_node->child1());
12387
12388 LBasicBlock isCellCase = m_out.newBlock();
12389 LBasicBlock continuation = m_out.newBlock();
12390
12391 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
12392 m_out.branch(isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
12393
12394 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
12395 ValueFromBlock cellResult = m_out.anchor(isTypedArrayView(value, provenType(m_node->child1())));
12396 m_out.jump(continuation);
12397
12398 m_out.appendTo(continuation, lastNext);
12399 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
12400 }
12401
12402 void compileTypeOf()
12403 {
12404 Edge child = m_node->child1();
12405 LValue value = lowJSValue(child);
12406
12407 LBasicBlock continuation = m_out.newBlock();
12408 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
12409
12410 Vector<ValueFromBlock> results;
12411
12412 buildTypeOf(
12413 child, value,
12414 [&] (TypeofType type) {
12415 results.append(m_out.anchor(weakPointer(vm().smallStrings.typeString(type))));
12416 m_out.jump(continuation);
12417 });
12418
12419 m_out.appendTo(continuation, lastNext);
12420 setJSValue(m_out.phi(Int64, results));
12421 }
12422
12423 template<AccessType type, typename SubscriptKind>
12424 void compileInBy(LValue base, SubscriptKind subscriptValue)
12425 {
12426 static_assert(type == AccessType::InById || type == AccessType::InByVal || type == AccessType::HasPrivateName || type == AccessType::HasPrivateBrand);
12427 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12428 patchpoint->appendSomeRegister(base);
12429 if constexpr (type != AccessType::InById)
12430 patchpoint->appendSomeRegister(subscriptValue);
12431 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
12432 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
12433 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12434 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 1 : 0;
12435
12436 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
12437
12438 State* state = &m_ftlState;
12439 Node* node = m_node;
12440 CodeOrigin semanticNodeOrigin = node->origin.semantic;
12441 patchpoint->setGenerator(
12442 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12443 AllowMacroScratchRegisterUsage allowScratch(jit);
12444
12445 CallSiteIndex callSiteIndex = state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(semanticNodeOrigin);
12446
12447 // This is the direct exit target for operation calls.
12448 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12449
12450 GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(0) : InvalidGPRReg;
12451 auto returnGPR = params[0].gpr();
12452 auto base = JSValueRegs(params[1].gpr());
12453
12454 constexpr auto optimizationFunction = [&] () {
12455 if constexpr (type == AccessType::InById)
12456 return operationInByIdOptimize;
12457 else if constexpr (type == AccessType::InByVal)
12458 return operationInByValOptimize;
12459 else if constexpr (type == AccessType::HasPrivateName)
12460 return operationHasPrivateNameOptimize;
12461 else {
12462 static_assert(type == AccessType::HasPrivateBrand);
12463 return operationHasPrivateBrandOptimize;
12464 }
12465 }();
12466
12467 const auto subscript = [&] {
12468 if constexpr (type == AccessType::InById)
12469 return CCallHelpers::TrustedImmPtr(subscriptValue.rawBits());
12470 else
12471 return JSValueRegs(params[2].gpr());
12472 }();
12473
12474 const auto generator = [&] {
12475 if constexpr (type == AccessType::InById) {
12476 return Box<JITInByIdGenerator>::create(
12477 jit.codeBlock(), JITType::FTLJIT, semanticNodeOrigin, callSiteIndex,
12478 params.unavailableRegisters(), subscriptValue, base,
12479 JSValueRegs(returnGPR), stubInfoGPR);
12480 } else {
12481 return Box<JITInByValGenerator>::create(
12482 jit.codeBlock(), JITType::FTLJIT, semanticNodeOrigin, callSiteIndex,
12483 type, params.unavailableRegisters(), base, subscript,
12484 JSValueRegs(returnGPR), stubInfoGPR);
12485 }
12486 }();
12487
12488 CCallHelpers::JumpList slowCases;
12489 generator->generateFastPath(jit);
12490 if constexpr (type == AccessType::InById)
12491 slowCases.append(generator->slowPathJump());
12492 else {
12493 if (!JITCode::useDataIC(JITType::FTLJIT))
12494 slowCases.append(generator->slowPathJump());
12495 }
12496 CCallHelpers::Label done = jit.label();
12497
12498 params.addLatePath(
12499 [=] (CCallHelpers& jit) {
12500 AllowMacroScratchRegisterUsage allowScratch(jit);
12501
12502 slowCases.link(&jit);
12503 CCallHelpers::Label slowPathBegin = jit.label();
12504 CCallHelpers::Call slowPathCall;
12505 if constexpr (type != AccessType::InByVal) {
12506 if (JITCode::useDataIC(JITType::FTLJIT)) {
12507 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
12508 generator->stubInfo()->m_slowOperation = optimizationFunction;
12509 slowPathCall = callOperation(
12510 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
12511 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), returnGPR,
12512 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
12513 stubInfoGPR, base, subscript).call();
12514 } else {
12515 slowPathCall = callOperation(
12516 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
12517 exceptions.get(), optimizationFunction, returnGPR,
12518 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
12519 CCallHelpers::TrustedImmPtr(generator->stubInfo()), base, subscript).call();
12520 }
12521 } else {
12522 if (JITCode::useDataIC(JITType::FTLJIT)) {
12523 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
12524 generator->stubInfo()->m_slowOperation = optimizationFunction;
12525 slowPathCall = callOperation(
12526 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
12527 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), returnGPR,
12528 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
12529 stubInfoGPR,
12530 CCallHelpers::TrustedImmPtr(nullptr), base, subscript).call();
12531 } else {
12532 slowPathCall = callOperation(
12533 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
12534 exceptions.get(), optimizationFunction, returnGPR,
12535 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
12536 CCallHelpers::TrustedImmPtr(generator->stubInfo()),
12537 CCallHelpers::TrustedImmPtr(nullptr), base, subscript).call();
12538 }
12539 }
12540 jit.jump().linkTo(done, &jit);
12541
12542 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12543
12544 jit.addLinkTask(
12545 [=] (LinkBuffer& linkBuffer) {
12546 generator->finalize(linkBuffer, linkBuffer);
12547 });
12548 });
12549 });
12550
12551 setJSValue(patchpoint);
12552 }
12553
12554 void compileInById()
12555 {
12556 compileInBy<AccessType::InById>(lowCell(m_node->child1()), m_node->cacheableIdentifier());
12557 }
12558
12559 void compileInByVal()
12560 {
12561 compileInBy<AccessType::InByVal>(lowCell(m_node->child1()), lowJSValue(m_node->child2()));
12562 }
12563
12564 void compileHasPrivateName()
12565 {
12566 compileInBy<AccessType::HasPrivateName>(lowCell(m_node->child1()), lowSymbol(m_node->child2()));
12567 }
12568
12569 void compileHasPrivateBrand()
12570 {
12571 compileInBy<AccessType::HasPrivateBrand>(lowCell(m_node->child1()), lowSymbol(m_node->child2()));
12572 }
12573
12574 void compileHasOwnProperty()
12575 {
12576 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12577 LBasicBlock slowCase = m_out.newBlock();
12578 LBasicBlock continuation = m_out.newBlock();
12579 LBasicBlock lastNext = nullptr;
12580
12581 LValue object = lowObject(m_node->child1());
12582 LValue uniquedStringImpl;
12583 LValue keyAsValue = nullptr;
12584 switch (m_node->child2().useKind()) {
12585 case StringUse: {
12586 LBasicBlock isNonEmptyString = m_out.newBlock();
12587 LBasicBlock isAtomString = m_out.newBlock();
12588
12589 keyAsValue = lowString(m_node->child2());
12590 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
12591
12592 lastNext = m_out.appendTo(isNonEmptyString, isAtomString);
12593 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
12594 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtom()));
12595 m_out.branch(isNotAtomic, rarely(slowCase), usually(isAtomString));
12596
12597 m_out.appendTo(isAtomString, slowCase);
12598 break;
12599 }
12600 case SymbolUse: {
12601 keyAsValue = lowSymbol(m_node->child2());
12602 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl);
12603 lastNext = m_out.insertNewBlocksBefore(slowCase);
12604 break;
12605 }
12606 case UntypedUse: {
12607 LBasicBlock isCellCase = m_out.newBlock();
12608 LBasicBlock isStringCase = m_out.newBlock();
12609 LBasicBlock notStringCase = m_out.newBlock();
12610 LBasicBlock isNonEmptyString = m_out.newBlock();
12611 LBasicBlock isSymbolCase = m_out.newBlock();
12612 LBasicBlock hasUniquedStringImpl = m_out.newBlock();
12613
12614 keyAsValue = lowJSValue(m_node->child2());
12615 m_out.branch(isCell(keyAsValue), usually(isCellCase), rarely(slowCase));
12616
12617 lastNext = m_out.appendTo(isCellCase, isStringCase);
12618 m_out.branch(isString(keyAsValue), unsure(isStringCase), unsure(notStringCase));
12619
12620 m_out.appendTo(isStringCase, isNonEmptyString);
12621 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
12622
12623 m_out.appendTo(isNonEmptyString, notStringCase);
12624 LValue implFromString = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
12625 ValueFromBlock stringResult = m_out.anchor(implFromString);
12626 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(implFromString, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtom()));
12627 m_out.branch(isNotAtomic, rarely(slowCase), usually(hasUniquedStringImpl));
12628
12629 m_out.appendTo(notStringCase, isSymbolCase);
12630 m_out.branch(isSymbol(keyAsValue), unsure(isSymbolCase), unsure(slowCase));
12631
12632 m_out.appendTo(isSymbolCase, hasUniquedStringImpl);
12633 ValueFromBlock symbolResult = m_out.anchor(m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl));
12634 m_out.jump(hasUniquedStringImpl);
12635
12636 m_out.appendTo(hasUniquedStringImpl, slowCase);
12637 uniquedStringImpl = m_out.phi(pointerType(), stringResult, symbolResult);
12638 break;
12639 }
12640 default:
12641 RELEASE_ASSERT_NOT_REACHED();
12642 }
12643
12644 ASSERT(keyAsValue);
12645
12646 // Note that we don't test if the hash is zero here. AtomStringImpl's can't have a zero
12647 // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only
12648 // ever load the result from the cache if the cache entry matches what we are querying for.
12649 // So we either get super lucky and use zero for the hash and somehow collide with the entity
12650 // we're looking for, or we realize we're comparing against another entity, and go to the
12651 // slow path anyways.
12652 LValue hash = m_out.lShr(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
12653
12654 LValue structureID = m_out.load32(object, m_heaps.JSCell_structureID);
12655 LValue index = m_out.add(hash, structureID);
12656 index = m_out.zeroExtPtr(m_out.bitAnd(index, m_out.constInt32(HasOwnPropertyCache::mask)));
12657 ASSERT(vm().hasOwnPropertyCache());
12658 LValue cache = m_out.constIntPtr(vm().hasOwnPropertyCache());
12659
12660 IndexedAbstractHeap& heap = m_heaps.HasOwnPropertyCache;
12661 LValue sameStructureID = m_out.equal(structureID, m_out.load32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfStructureID())));
12662 LValue sameImpl = m_out.equal(uniquedStringImpl, m_out.loadPtr(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfImpl())));
12663 ValueFromBlock fastResult = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfResult())));
12664 LValue cacheHit = m_out.bitAnd(sameStructureID, sameImpl);
12665
12666 m_out.branch(m_out.notZero32(cacheHit), usually(continuation), rarely(slowCase));
12667
12668 m_out.appendTo(slowCase, continuation);
12669 ValueFromBlock slowResult;
12670 slowResult = m_out.anchor(m_out.notZero64(vmCall(Int64, operationHasOwnProperty, weakPointer(globalObject), object, keyAsValue)));
12671 m_out.jump(continuation);
12672
12673 m_out.appendTo(continuation, lastNext);
12674 setBoolean(m_out.phi(Int32, fastResult, slowResult));
12675 }
12676
12677 void compileParseInt()
12678 {
12679 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse || m_node->child1().useKind() == StringUse);
12680 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12681 LValue result;
12682 if (m_node->child2()) {
12683 LValue radix = lowInt32(m_node->child2());
12684 if (m_node->child1().useKind() == UntypedUse)
12685 result = vmCall(Int64, operationParseIntGeneric, weakPointer(globalObject), lowJSValue(m_node->child1()), radix);
12686 else
12687 result = vmCall(Int64, operationParseIntString, weakPointer(globalObject), lowString(m_node->child1()), radix);
12688 } else {
12689 if (m_node->child1().useKind() == UntypedUse)
12690 result = vmCall(Int64, operationParseIntNoRadixGeneric, weakPointer(globalObject), lowJSValue(m_node->child1()));
12691 else
12692 result = vmCall(Int64, operationParseIntStringNoRadix, weakPointer(globalObject), lowString(m_node->child1()));
12693 }
12694 setJSValue(result);
12695 }
12696
12697 void compileOverridesHasInstance()
12698 {
12699 FrozenValue* defaultHasInstanceFunction = m_node->cellOperand();
12700 ASSERT(defaultHasInstanceFunction->cell()->inherits<JSFunction>(vm()));
12701
12702 LValue constructor = lowCell(m_node->child1());
12703 LValue hasInstance = lowJSValue(m_node->child2());
12704
12705 LBasicBlock defaultHasInstance = m_out.newBlock();
12706 LBasicBlock continuation = m_out.newBlock();
12707
12708 // Unlike in the DFG, we don't worry about cleaning this code up for the case where we have proven the hasInstanceValue is a constant as B3 should fix it for us.
12709
12710 ValueFromBlock notDefaultHasInstanceResult = m_out.anchor(m_out.booleanTrue);
12711 m_out.branch(m_out.notEqual(hasInstance, frozenPointer(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance));
12712
12713 LBasicBlock lastNext = m_out.appendTo(defaultHasInstance, continuation);
12714 ValueFromBlock implementsDefaultHasInstanceResult = m_out.anchor(m_out.testIsZero32(
12715 m_out.load8ZeroExt32(constructor, m_heaps.JSCell_typeInfoFlags),
12716 m_out.constInt32(ImplementsDefaultHasInstance)));
12717 m_out.jump(continuation);
12718
12719 m_out.appendTo(continuation, lastNext);
12720 setBoolean(m_out.phi(Int32, implementsDefaultHasInstanceResult, notDefaultHasInstanceResult));
12721 }
12722
12723 void compileCheckTypeInfoFlags()
12724 {
12725 speculate(
12726 BadTypeInfoFlags, noValue(), nullptr,
12727 m_out.testIsZero32(
12728 m_out.load8ZeroExt32(lowCell(m_node->child1()), m_heaps.JSCell_typeInfoFlags),
12729 m_out.constInt32(m_node->typeInfoOperand())));
12730 }
12731
12732 void compileInstanceOf()
12733 {
12734 Node* node = m_node;
12735 State* state = &m_ftlState;
12736
12737 LValue value;
12738 LValue prototype;
12739 bool valueIsCell;
12740 bool prototypeIsCell;
12741 if (m_node->child1().useKind() == CellUse
12742 && m_node->child2().useKind() == CellUse) {
12743 value = lowCell(m_node->child1());
12744 prototype = lowCell(m_node->child2());
12745
12746 valueIsCell = true;
12747 prototypeIsCell = true;
12748 } else {
12749 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
12750 DFG_ASSERT(m_graph, m_node, m_node->child2().useKind() == UntypedUse);
12751
12752 value = lowJSValue(m_node->child1());
12753 prototype = lowJSValue(m_node->child2());
12754
12755 valueIsCell = abstractValue(m_node->child1()).isType(SpecCell);
12756 prototypeIsCell = abstractValue(m_node->child2()).isType(SpecCell);
12757 }
12758
12759 bool prototypeIsObject = abstractValue(m_node->child2()).isType(SpecObject | ~SpecCell);
12760
12761 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12762 patchpoint->appendSomeRegister(value);
12763 patchpoint->appendSomeRegister(prototype);
12764 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
12765 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
12766 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 3 : 2;
12767 patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
12768 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12769
12770 RefPtr<PatchpointExceptionHandle> exceptionHandle =
12771 preparePatchpointForExceptions(patchpoint);
12772
12773 CodeOrigin semanticNodeOrigin = node->origin.semantic;
12774 patchpoint->setGenerator(
12775 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12776 AllowMacroScratchRegisterUsage allowScratch(jit);
12777
12778 GPRReg resultGPR = params[0].gpr();
12779 GPRReg valueGPR = params[1].gpr();
12780 GPRReg prototypeGPR = params[2].gpr();
12781 GPRReg scratchGPR = params.gpScratch(0);
12782 GPRReg scratch2GPR = params.gpScratch(1);
12783 GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(2) : InvalidGPRReg;
12784
12785 CCallHelpers::Jump doneJump;
12786 if (!valueIsCell) {
12787 CCallHelpers::Jump isCell = jit.branchIfCell(valueGPR);
12788 jit.boxBooleanPayload(false, resultGPR);
12789 doneJump = jit.jump();
12790 isCell.link(&jit);
12791 }
12792
12793 CCallHelpers::JumpList slowCases;
12794 if (!prototypeIsCell)
12795 slowCases.append(jit.branchIfNotCell(prototypeGPR));
12796
12797 CallSiteIndex callSiteIndex =
12798 state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(semanticNodeOrigin);
12799
12800 // This is the direct exit target for operation calls.
12801 Box<CCallHelpers::JumpList> exceptions =
12802 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12803
12804 auto generator = Box<JITInstanceOfGenerator>::create(
12805 jit.codeBlock(), JITType::FTLJIT, semanticNodeOrigin, callSiteIndex,
12806 params.unavailableRegisters(), resultGPR, valueGPR, prototypeGPR, stubInfoGPR, scratchGPR,
12807 scratch2GPR, prototypeIsObject);
12808 generator->generateFastPath(jit);
12809 if (!JITCode::useDataIC(JITType::FTLJIT))
12810 slowCases.append(generator->slowPathJump());
12811 CCallHelpers::Label done = jit.label();
12812
12813 params.addLatePath(
12814 [=] (CCallHelpers& jit) {
12815 AllowMacroScratchRegisterUsage allowScratch(jit);
12816
12817 J_JITOperation_GSsiJJ optimizationFunction = operationInstanceOfOptimize;
12818
12819 slowCases.link(&jit);
12820 CCallHelpers::Label slowPathBegin = jit.label();
12821 CCallHelpers::Call slowPathCall;
12822 if (JITCode::useDataIC(JITType::FTLJIT)) {
12823 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
12824 generator->stubInfo()->m_slowOperation = optimizationFunction;
12825 slowPathCall = callOperation(
12826 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
12827 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), resultGPR,
12828 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
12829 stubInfoGPR, valueGPR,
12830 prototypeGPR).call();
12831 } else {
12832 slowPathCall = callOperation(
12833 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
12834 exceptions.get(), optimizationFunction, resultGPR,
12835 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
12836 CCallHelpers::TrustedImmPtr(generator->stubInfo()), valueGPR,
12837 prototypeGPR).call();
12838 }
12839 jit.jump().linkTo(done, &jit);
12840
12841 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12842
12843 jit.addLinkTask(
12844 [=] (LinkBuffer& linkBuffer) {
12845 generator->finalize(linkBuffer, linkBuffer);
12846 });
12847 });
12848
12849 if (doneJump.isSet())
12850 doneJump.link(&jit);
12851 });
12852
12853 // This returns a boxed boolean.
12854 setJSValue(patchpoint);
12855 }
12856
12857 void compileInstanceOfCustom()
12858 {
12859 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12860 LValue value = lowJSValue(m_node->child1());
12861 LValue constructor = lowCell(m_node->child2());
12862 LValue hasInstance = lowJSValue(m_node->child3());
12863
12864 setBoolean(m_out.notZero64(vmCall(Int64, operationInstanceOfCustom, weakPointer(globalObject), value, constructor, hasInstance)));
12865 }
12866
12867 void compileCountExecution()
12868 {
12869 TypedPointer counter = m_out.absolute(m_node->executionCounter()->address());
12870 m_out.store64(m_out.add(m_out.load64(counter), m_out.constInt64(1)), counter);
12871 }
12872
12873 void compileSuperSamplerBegin()
12874 {
12875 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
12876 m_out.store32(m_out.add(m_out.load32(counter), m_out.constInt32(1)), counter);
12877 }
12878
12879 void compileSuperSamplerEnd()
12880 {
12881 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
12882 m_out.store32(m_out.sub(m_out.load32(counter), m_out.constInt32(1)), counter);
12883 }
12884
12885 void compileStoreBarrier()
12886 {
12887 emitStoreBarrier(lowCell(m_node->child1()), m_node->op() == FencedStoreBarrier);
12888 }
12889
12890 void compileHasIndexedProperty(S_JITOperation_GCZ slowPathOperation)
12891 {
12892 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
12893 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
12894 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
12895 ArrayMode mode = m_node->arrayMode();
12896
12897 switch (m_node->arrayMode().type()) {
12898 case Array::Int32:
12899 case Array::Contiguous: {
12900 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
12901
12902 IndexedAbstractHeap& heap = mode.type() == Array::Int32 ?
12903 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
12904
12905 LBasicBlock slowCase = m_out.newBlock();
12906 LBasicBlock continuation = m_out.newBlock();
12907 LBasicBlock lastNext = nullptr;
12908
12909 if (!mode.isInBounds()) {
12910 LBasicBlock checkHole = m_out.newBlock();
12911 m_out.branch(
12912 m_out.aboveOrEqual(
12913 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
12914 rarely(slowCase), usually(checkHole));
12915 lastNext = m_out.appendTo(checkHole, slowCase);
12916 } else
12917 lastNext = m_out.insertNewBlocksBefore(slowCase);
12918
12919 LValue checkHoleResultValue =
12920 m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1))));
12921 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
12922 if (mode.isInBoundsSaneChain())
12923 m_out.jump(continuation);
12924 else if (!mode.isInBounds())
12925 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
12926 else
12927 speculateAndJump(continuation, LoadFromHole, noValue(), nullptr, checkHoleResultValue);
12928
12929 m_out.appendTo(slowCase, continuation);
12930 ValueFromBlock slowResult = m_out.anchor(
12931 m_out.notZero64(vmCall(Int64, slowPathOperation, weakPointer(globalObject), base, index)));
12932 m_out.jump(continuation);
12933
12934 m_out.appendTo(continuation, lastNext);
12935 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
12936 return;
12937 }
12938 case Array::Double: {
12939 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
12940
12941 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
12942
12943 LBasicBlock slowCase = m_out.newBlock();
12944 LBasicBlock continuation = m_out.newBlock();
12945 LBasicBlock lastNext = nullptr;
12946
12947 if (!m_node->arrayMode().isInBounds()) {
12948 LBasicBlock checkHole = m_out.newBlock();
12949 m_out.branch(
12950 m_out.aboveOrEqual(
12951 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
12952 rarely(slowCase), usually(checkHole));
12953 lastNext = m_out.appendTo(checkHole, slowCase);
12954 } else
12955 lastNext = m_out.insertNewBlocksBefore(slowCase);
12956
12957 LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
12958 LValue checkHoleResultValue = m_out.doubleEqual(doubleValue, doubleValue);
12959 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
12960 if (mode.isInBoundsSaneChain())
12961 m_out.jump(continuation);
12962 else if (!mode.isInBounds())
12963 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
12964 else
12965 speculateAndJump(continuation, LoadFromHole, noValue(), nullptr, checkHoleResultValue);
12966
12967 m_out.appendTo(slowCase, continuation);
12968 ValueFromBlock slowResult = m_out.anchor(
12969 m_out.notZero64(vmCall(Int64, slowPathOperation, weakPointer(globalObject), base, index)));
12970 m_out.jump(continuation);
12971
12972 m_out.appendTo(continuation, lastNext);
12973 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
12974 return;
12975 }
12976
12977 case Array::ArrayStorage: {
12978 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
12979
12980 LBasicBlock slowCase = m_out.newBlock();
12981 LBasicBlock continuation = m_out.newBlock();
12982 LBasicBlock lastNext = nullptr;
12983
12984 if (!m_node->arrayMode().isInBounds()) {
12985 LBasicBlock checkHole = m_out.newBlock();
12986 m_out.branch(
12987 m_out.aboveOrEqual(
12988 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
12989 rarely(slowCase), usually(checkHole));
12990 lastNext = m_out.appendTo(checkHole, slowCase);
12991 } else
12992 lastNext = m_out.insertNewBlocksBefore(slowCase);
12993
12994 LValue checkHoleResultValue =
12995 m_out.notZero64(m_out.load64(baseIndex(m_heaps.ArrayStorage_vector, storage, index, m_graph.varArgChild(m_node, 1))));
12996 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
12997 if (mode.isInBoundsSaneChain())
12998 m_out.jump(continuation);
12999 else if (!mode.isInBounds())
13000 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
13001 else
13002 speculateAndJump(continuation, LoadFromHole, noValue(), nullptr, checkHoleResultValue);
13003
13004 m_out.appendTo(slowCase, continuation);
13005 ValueFromBlock slowResult = m_out.anchor(
13006 m_out.notZero64(vmCall(Int64, slowPathOperation, weakPointer(globalObject), base, index)));
13007 m_out.jump(continuation);
13008
13009 m_out.appendTo(continuation, lastNext);
13010 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
13011 break;
13012 }
13013
13014 default: {
13015 setBoolean(m_out.notZero64(vmCall(Int64, slowPathOperation, weakPointer(globalObject), base, index)));
13016 break;
13017 }
13018 }
13019 }
13020
13021 void compileHasEnumerableProperty()
13022 {
13023 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13024 LValue base = lowJSValue(m_node->child1());
13025 LValue property = lowCell(m_node->child2());
13026 setJSValue(vmCall(Int64, operationHasEnumerableProperty, weakPointer(globalObject), base, property));
13027 }
13028
13029 template <typename SlowPathCall>
13030 void compileHasStructurePropertyImpl(LValue base, SlowPathCall slowPathCall)
13031 {
13032 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13033 LValue property = lowString(m_node->child2());
13034 LValue enumerator = lowCell(m_node->child3());
13035
13036 LBasicBlock isCellCase = m_out.newBlock();
13037 LBasicBlock correctStructure = m_out.newBlock();
13038 LBasicBlock slowPath = m_out.newBlock();
13039 LBasicBlock continuation = m_out.newBlock();
13040
13041 m_out.branch(isCell(base, provenType(m_node->child1())),
13042 usually(isCellCase), rarely(slowPath));
13043
13044 LBasicBlock lastNext = m_out.appendTo(isCellCase, correctStructure);
13045
13046 m_out.branch(m_out.notEqual(
13047 m_out.load32(base, m_heaps.JSCell_structureID),
13048 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
13049 rarely(slowPath), usually(correctStructure));
13050
13051 m_out.appendTo(correctStructure, slowPath);
13052 ValueFromBlock correctStructureResult = m_out.anchor(m_out.booleanTrue);
13053 m_out.jump(continuation);
13054
13055 m_out.appendTo(slowPath, continuation);
13056 ValueFromBlock slowPathResult = m_out.anchor(
13057 m_out.equal(
13058 m_out.constInt64(JSValue::encode(jsBoolean(true))),
13059 vmCall(Int64, slowPathCall, weakPointer(globalObject), base, property)));
13060 m_out.jump(continuation);
13061
13062 m_out.appendTo(continuation, lastNext);
13063 setBoolean(m_out.phi(Int32, correctStructureResult, slowPathResult));
13064 }
13065
13066 void compileHasEnumerableStructureProperty()
13067 {
13068 compileHasStructurePropertyImpl(lowJSValue(m_node->child1()), operationHasEnumerableProperty);
13069 }
13070
13071 void compileHasOwnStructureProperty()
13072 {
13073 compileHasStructurePropertyImpl(lowCell(m_node->child1()), operationHasOwnStructureProperty);
13074 }
13075
13076 void compileInStructureProperty()
13077 {
13078 compileHasStructurePropertyImpl(lowCell(m_node->child1()), operationInStructureProperty);
13079 }
13080
13081 void compileGetDirectPname()
13082 {
13083 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13084 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
13085 LValue property = lowCell(m_graph.varArgChild(m_node, 1));
13086 LValue index = lowInt32(m_graph.varArgChild(m_node, 2));
13087 LValue enumerator = lowCell(m_graph.varArgChild(m_node, 3));
13088
13089 LBasicBlock checkOffset = m_out.newBlock();
13090 LBasicBlock inlineLoad = m_out.newBlock();
13091 LBasicBlock outOfLineLoad = m_out.newBlock();
13092 LBasicBlock slowCase = m_out.newBlock();
13093 LBasicBlock continuation = m_out.newBlock();
13094
13095 m_out.branch(m_out.notEqual(
13096 m_out.load32(base, m_heaps.JSCell_structureID),
13097 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
13098 rarely(slowCase), usually(checkOffset));
13099
13100 LBasicBlock lastNext = m_out.appendTo(checkOffset, inlineLoad);
13101 m_out.branch(m_out.aboveOrEqual(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)),
13102 unsure(outOfLineLoad), unsure(inlineLoad));
13103
13104 m_out.appendTo(inlineLoad, outOfLineLoad);
13105 ValueFromBlock inlineResult = m_out.anchor(
13106 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(),
13107 base, m_out.zeroExt(index, Int64), ScaleEight, JSObject::offsetOfInlineStorage())));
13108 m_out.jump(continuation);
13109
13110 m_out.appendTo(outOfLineLoad, slowCase);
13111 LValue storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
13112 LValue realIndex = m_out.signExt32To64(
13113 m_out.neg(m_out.sub(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity))));
13114 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
13115 ValueFromBlock outOfLineResult = m_out.anchor(
13116 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty)));
13117 m_out.jump(continuation);
13118
13119 m_out.appendTo(slowCase, continuation);
13120 ValueFromBlock slowCaseResult = m_out.anchor(
13121 vmCall(Int64, operationGetByVal, weakPointer(globalObject), base, property));
13122 m_out.jump(continuation);
13123
13124 m_out.appendTo(continuation, lastNext);
13125 setJSValue(m_out.phi(Int64, inlineResult, outOfLineResult, slowCaseResult));
13126 }
13127
13128 void compileGetEnumerableLength()
13129 {
13130 LValue enumerator = lowCell(m_node->child1());
13131 setInt32(m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_indexLength));
13132 }
13133
13134 void compileGetPropertyEnumerator()
13135 {
13136 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13137 if (m_node->child1().useKind() == CellUse)
13138 setJSValue(vmCall(Int64, operationGetPropertyEnumeratorCell, weakPointer(globalObject), lowCell(m_node->child1())));
13139 else
13140 setJSValue(vmCall(Int64, operationGetPropertyEnumerator, weakPointer(globalObject), lowJSValue(m_node->child1())));
13141 }
13142
13143 void compileGetEnumeratorStructurePname()
13144 {
13145 LValue enumerator = lowCell(m_node->child1());
13146 LValue index = lowInt32(m_node->child2());
13147
13148 LBasicBlock inBounds = m_out.newBlock();
13149 LBasicBlock outOfBounds = m_out.newBlock();
13150 LBasicBlock continuation = m_out.newBlock();
13151
13152 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endStructurePropertyIndex)),
13153 usually(inBounds), rarely(outOfBounds));
13154
13155 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
13156 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
13157 ValueFromBlock inBoundsResult = m_out.anchor(
13158 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
13159 m_out.jump(continuation);
13160
13161 m_out.appendTo(outOfBounds, continuation);
13162 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(JSValue::ValueNull));
13163 m_out.jump(continuation);
13164
13165 m_out.appendTo(continuation, lastNext);
13166 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
13167 }
13168
13169 void compileGetEnumeratorGenericPname()
13170 {
13171 LValue enumerator = lowCell(m_node->child1());
13172 LValue index = lowInt32(m_node->child2());
13173
13174 LBasicBlock inBounds = m_out.newBlock();
13175 LBasicBlock outOfBounds = m_out.newBlock();
13176 LBasicBlock continuation = m_out.newBlock();
13177
13178 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endGenericPropertyIndex)),
13179 usually(inBounds), rarely(outOfBounds));
13180
13181 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
13182 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
13183 ValueFromBlock inBoundsResult = m_out.anchor(
13184 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
13185 m_out.jump(continuation);
13186
13187 m_out.appendTo(outOfBounds, continuation);
13188 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(JSValue::ValueNull));
13189 m_out.jump(continuation);
13190
13191 m_out.appendTo(continuation, lastNext);
13192 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
13193 }
13194
13195 void compileToIndexString()
13196 {
13197 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13198 LValue index = lowInt32(m_node->child1());
13199 setJSValue(vmCall(Int64, operationToIndexString, weakPointer(globalObject), index));
13200 }
13201
13202 void compileCheckStructureImmediate()
13203 {
13204 LValue structure = lowCell(m_node->child1());
13205 checkStructure(
13206 structure, noValue(), BadCache, m_node->structureSet(),
13207 [this] (RegisteredStructure structure) {
13208 return weakStructure(structure);
13209 });
13210 }
13211
13212 void compileMaterializeNewObject()
13213 {
13214 ObjectMaterializationData& data = m_node->objectMaterializationData();
13215
13216 // Lower the values first, to avoid creating values inside a control flow diamond.
13217
13218 Vector<LValue, 8> values;
13219 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
13220 Edge edge = m_graph.varArgChild(m_node, 1 + i);
13221 switch (data.m_properties[i].kind()) {
13222 case PublicLengthPLoc:
13223 case VectorLengthPLoc:
13224 values.append(lowInt32(edge));
13225 break;
13226 default:
13227 values.append(lowJSValue(edge));
13228 break;
13229 }
13230 }
13231
13232 RegisteredStructureSet set = m_node->structureSet();
13233
13234 Vector<LBasicBlock, 1> blocks(set.size());
13235 for (unsigned i = set.size(); i--;)
13236 blocks[i] = m_out.newBlock();
13237 LBasicBlock dummyDefault = m_out.newBlock();
13238 LBasicBlock outerContinuation = m_out.newBlock();
13239
13240 Vector<SwitchCase, 1> cases(set.size());
13241 for (unsigned i = set.size(); i--;)
13242 cases[i] = SwitchCase(weakStructure(set.at(i)), blocks[i], Weight(1));
13243 m_out.switchInstruction(
13244 lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0));
13245
13246 LBasicBlock outerLastNext = m_out.m_nextBlock;
13247
13248 Vector<ValueFromBlock, 1> results;
13249
13250 for (unsigned i = set.size(); i--;) {
13251 m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault);
13252
13253 RegisteredStructure structure = set.at(i);
13254
13255 LValue object;
13256 LValue butterfly;
13257
13258 if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
13259 Allocator cellAllocator;
13260 if (structure->type() == JSType::ArrayType)
13261 cellAllocator = allocatorForNonVirtualConcurrently<JSArray>(vm(), JSArray::allocationSize(structure->inlineCapacity()), AllocatorForMode::AllocatorIfExists);
13262 else
13263 cellAllocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), JSFinalObject::allocationSize(structure->inlineCapacity()), AllocatorForMode::AllocatorIfExists);
13264
13265 bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
13266 unsigned indexingHeaderSize = 0;
13267 LValue indexingPayloadSizeInBytes = m_out.intPtrZero;
13268 LValue vectorLength = m_out.int32Zero;
13269 LValue publicLength = m_out.int32Zero;
13270 if (hasIndexingHeader) {
13271 indexingHeaderSize = sizeof(IndexingHeader);
13272 for (unsigned i = data.m_properties.size(); i--;) {
13273 PromotedLocationDescriptor descriptor = data.m_properties[i];
13274 switch (descriptor.kind()) {
13275 case PublicLengthPLoc:
13276 publicLength = values[i];
13277 break;
13278 case VectorLengthPLoc:
13279 vectorLength = values[i];
13280 break;
13281 default:
13282 break;
13283 }
13284 }
13285 indexingPayloadSizeInBytes =
13286 m_out.mul(m_out.zeroExtPtr(vectorLength), m_out.intPtrEight);
13287 }
13288
13289 LValue butterflySize = m_out.add(
13290 m_out.constIntPtr(
13291 structure->outOfLineCapacity() * sizeof(JSValue) + indexingHeaderSize),
13292 indexingPayloadSizeInBytes);
13293
13294 LBasicBlock slowPath = m_out.newBlock();
13295 LBasicBlock continuation = m_out.newBlock();
13296
13297 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
13298
13299 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
13300
13301 LValue startOfStorage = allocateHeapCell(
13302 allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, slowPath),
13303 slowPath);
13304
13305 LValue fastButterflyValue = m_out.add(
13306 startOfStorage,
13307 m_out.constIntPtr(
13308 structure->outOfLineCapacity() * sizeof(JSValue) + sizeof(IndexingHeader)));
13309
13310 ValueFromBlock haveButterfly = m_out.anchor(fastButterflyValue);
13311
13312 splatWords(
13313 fastButterflyValue,
13314 m_out.constInt32(-structure->outOfLineCapacity() - 1),
13315 m_out.constInt32(-1),
13316 m_out.int64Zero, m_heaps.properties.atAnyNumber());
13317
13318 m_out.store32(vectorLength, fastButterflyValue, m_heaps.Butterfly_vectorLength);
13319
13320 LValue fastObjectValue = allocateObject(
13321 m_out.constIntPtr(cellAllocator.localAllocator()), structure, fastButterflyValue,
13322 slowPath);
13323
13324 ValueFromBlock fastObject = m_out.anchor(fastObjectValue);
13325 ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue);
13326 m_out.jump(continuation);
13327
13328 m_out.appendTo(slowPath, continuation);
13329
13330 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
13331
13332 VM& vm = this->vm();
13333 LValue slowObjectValue;
13334 if (hasIndexingHeader) {
13335 slowObjectValue = lazySlowPath(
13336 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13337 return createLazyCallGenerator(vm,
13338 operationNewObjectWithButterflyWithIndexingHeaderAndVectorLength,
13339 locations[0].directGPR(), &vm, CCallHelpers::TrustedImmPtr(structure.get()),
13340 locations[1].directGPR(), locations[2].directGPR());
13341 },
13342 vectorLength, butterflyValue);
13343 } else {
13344 slowObjectValue = lazySlowPath(
13345 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13346 return createLazyCallGenerator(vm,
13347 operationNewObjectWithButterfly, locations[0].directGPR(), &vm,
13348 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR());
13349 },
13350 butterflyValue);
13351 }
13352 ValueFromBlock slowObject = m_out.anchor(slowObjectValue);
13353 ValueFromBlock slowButterfly = m_out.anchor(
13354 m_out.loadPtr(slowObjectValue, m_heaps.JSObject_butterfly));
13355
13356 m_out.jump(continuation);
13357
13358 m_out.appendTo(continuation, lastNext);
13359
13360 object = m_out.phi(pointerType(), fastObject, slowObject);
13361 butterfly = m_out.phi(pointerType(), fastButterfly, slowButterfly);
13362
13363 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
13364
13365 initializeArrayElements(m_out.constInt32(structure->indexingType()), m_out.int32Zero, vectorLength, butterfly);
13366
13367 HashMap<int32_t, LValue, DefaultHash<int32_t>, WTF::UnsignedWithZeroKeyHashTraits<int32_t>> indexMap;
13368 Vector<int32_t> indices;
13369 for (unsigned i = data.m_properties.size(); i--;) {
13370 PromotedLocationDescriptor descriptor = data.m_properties[i];
13371 if (descriptor.kind() != IndexedPropertyPLoc)
13372 continue;
13373 int32_t index = static_cast<int32_t>(descriptor.info());
13374
13375 auto result = indexMap.add(index, values[i]);
13376 DFG_ASSERT(m_graph, m_node, result); // Duplicates are illegal.
13377
13378 indices.append(index);
13379 }
13380
13381 if (!indices.isEmpty()) {
13382 std::sort(indices.begin(), indices.end());
13383
13384 Vector<LBasicBlock> blocksWithStores(indices.size());
13385 Vector<LBasicBlock> blocksWithChecks(indices.size());
13386
13387 for (unsigned i = indices.size(); i--;) {
13388 blocksWithStores[i] = m_out.newBlock();
13389 blocksWithChecks[i] = m_out.newBlock(); // blocksWithChecks[0] is the continuation.
13390 }
13391
13392 LBasicBlock indexLastNext = m_out.m_nextBlock;
13393
13394 for (unsigned i = indices.size(); i--;) {
13395 int32_t index = indices[i];
13396 LValue value = indexMap.get(index);
13397
13398 m_out.branch(
13399 m_out.below(m_out.constInt32(index), publicLength),
13400 unsure(blocksWithStores[i]), unsure(blocksWithChecks[i]));
13401
13402 m_out.appendTo(blocksWithStores[i], blocksWithChecks[i]);
13403
13404 // This has to type-check and convert its inputs, but it cannot do so in a
13405 // way that updates AI. That's a bit annoying, but if you think about how
13406 // sinking works, it's actually not a bad thing. We are virtually guaranteed
13407 // that these type checks will not fail, since the type checks that guarded
13408 // the original stores to the array are still somewhere above this point.
13409 Output::StoreType storeType;
13410 IndexedAbstractHeap* heap;
13411 switch (structure->indexingType()) {
13412 case ALL_INT32_INDEXING_TYPES:
13413 // FIXME: This could use the proven type if we had the Edge for the
13414 // value. https://bugs.webkit.org/show_bug.cgi?id=155311
13415 speculate(BadType, noValue(), nullptr, isNotInt32(value));
13416 storeType = Output::Store64;
13417 heap = &m_heaps.indexedInt32Properties;
13418 break;
13419
13420 case ALL_DOUBLE_INDEXING_TYPES: {
13421 // FIXME: If the source is ValueRep, we should avoid emitting any
13422 // checks. We could also avoid emitting checks if we had the Edge of
13423 // this value. https://bugs.webkit.org/show_bug.cgi?id=155311
13424
13425 LBasicBlock intCase = m_out.newBlock();
13426 LBasicBlock doubleCase = m_out.newBlock();
13427 LBasicBlock continuation = m_out.newBlock();
13428
13429 m_out.branch(isInt32(value), unsure(intCase), unsure(doubleCase));
13430
13431 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
13432
13433 ValueFromBlock intResult =
13434 m_out.anchor(m_out.intToDouble(unboxInt32(value)));
13435 m_out.jump(continuation);
13436
13437 m_out.appendTo(doubleCase, continuation);
13438
13439 speculate(BadType, noValue(), nullptr, isNumber(value));
13440 ValueFromBlock doubleResult = m_out.anchor(unboxDouble(value));
13441 m_out.jump(continuation);
13442
13443 m_out.appendTo(continuation, lastNext);
13444 value = m_out.phi(Double, intResult, doubleResult);
13445 storeType = Output::StoreDouble;
13446 heap = &m_heaps.indexedDoubleProperties;
13447 break;
13448 }
13449
13450 case ALL_CONTIGUOUS_INDEXING_TYPES:
13451 storeType = Output::Store64;
13452 heap = &m_heaps.indexedContiguousProperties;
13453 break;
13454
13455 default:
13456 DFG_CRASH(m_graph, m_node, "Invalid indexing type");
13457 break;
13458 }
13459
13460 m_out.store(value, m_out.address(butterfly, heap->at(index)), storeType);
13461
13462 m_out.jump(blocksWithChecks[i]);
13463 m_out.appendTo(
13464 blocksWithChecks[i], i ? blocksWithStores[i - 1] : indexLastNext);
13465 }
13466 }
13467 } else {
13468 // In the easy case where we can do a one-shot allocation, we simply allocate the
13469 // object to directly have the desired structure.
13470 object = allocateObject(structure);
13471 butterfly = nullptr; // Don't have one, don't need one.
13472 }
13473
13474 BitVector setInlineOffsets;
13475 for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
13476 for (unsigned i = data.m_properties.size(); i--;) {
13477 PromotedLocationDescriptor descriptor = data.m_properties[i];
13478 if (descriptor.kind() != NamedPropertyPLoc)
13479 continue;
13480 if (m_graph.identifiers()[descriptor.info()] != entry.key)
13481 continue;
13482
13483 LValue base;
13484 if (isInlineOffset(entry.offset)) {
13485 setInlineOffsets.set(entry.offset);
13486 base = object;
13487 } else
13488 base = butterfly;
13489 storeProperty(values[i], base, descriptor.info(), entry.offset);
13490 break;
13491 }
13492 }
13493 for (unsigned i = structure->inlineCapacity(); i--;) {
13494 if (!setInlineOffsets.get(i))
13495 m_out.store64(m_out.int64Zero, m_out.address(m_heaps.properties.atAnyNumber(), object, offsetRelativeToBase(i)));
13496 }
13497
13498 results.append(m_out.anchor(object));
13499 m_out.jump(outerContinuation);
13500 }
13501
13502 m_out.appendTo(dummyDefault, outerContinuation);
13503 m_out.unreachable();
13504
13505 m_out.appendTo(outerContinuation, outerLastNext);
13506 setJSValue(m_out.phi(pointerType(), results));
13507 mutatorFence();
13508 }
13509
13510 void compileMaterializeCreateActivation()
13511 {
13512 ObjectMaterializationData& data = m_node->objectMaterializationData();
13513
13514 Vector<LValue, 8> values;
13515 for (unsigned i = 0; i < data.m_properties.size(); ++i)
13516 values.append(lowJSValue(m_graph.varArgChild(m_node, 2 + i)));
13517
13518 LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
13519 SymbolTable* table = m_node->castOperand<SymbolTable*>();
13520 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_origin.semantic)->activationStructure());
13521
13522 LBasicBlock slowPath = m_out.newBlock();
13523 LBasicBlock continuation = m_out.newBlock();
13524
13525 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
13526
13527 LValue fastObject = allocateObject<JSLexicalEnvironment>(
13528 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
13529
13530 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
13531 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
13532
13533
13534 ValueFromBlock fastResult = m_out.anchor(fastObject);
13535 m_out.jump(continuation);
13536
13537 m_out.appendTo(slowPath, continuation);
13538 // We ensure allocation sinking explictly sets bottom values for all field members.
13539 // Therefore, it doesn't matter what JSValue we pass in as the initialization value
13540 // because all fields will be overwritten.
13541 // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that
13542 // doesn't initialize every slot because we are guaranteed to do that here.
13543 VM& vm = this->vm();
13544 LValue callResult = lazySlowPath(
13545 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13546 return createLazyCallGenerator(vm,
13547 operationCreateActivationDirect, locations[0].directGPR(), &vm,
13548 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
13549 CCallHelpers::TrustedImmPtr(table),
13550 CCallHelpers::TrustedImm64(JSValue::encode(jsUndefined())));
13551 }, scope);
13552 ValueFromBlock slowResult = m_out.anchor(callResult);
13553 m_out.jump(continuation);
13554
13555 m_out.appendTo(continuation, lastNext);
13556 LValue activation = m_out.phi(pointerType(), fastResult, slowResult);
13557 RELEASE_ASSERT(data.m_properties.size() == table->scopeSize());
13558 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
13559 PromotedLocationDescriptor descriptor = data.m_properties[i];
13560 ASSERT(descriptor.kind() == ClosureVarPLoc);
13561 m_out.store64(
13562 values[i], activation,
13563 m_heaps.JSLexicalEnvironment_variables[descriptor.info()]);
13564 }
13565
13566 if (validationEnabled()) {
13567 // Validate to make sure every slot in the scope has one value.
13568 ConcurrentJSLocker locker(table->m_lock);
13569 for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) {
13570 bool found = false;
13571 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
13572 PromotedLocationDescriptor descriptor = data.m_properties[i];
13573 ASSERT(descriptor.kind() == ClosureVarPLoc);
13574 if (iter->value.scopeOffset().offset() == descriptor.info()) {
13575 found = true;
13576 break;
13577 }
13578 }
13579 ASSERT_UNUSED(found, found);
13580 }
13581 }
13582
13583 mutatorFence();
13584 setJSValue(activation);
13585 }
13586
13587 template<typename JSClass, typename Operation>
13588 void compileMaterializeNewInternalFieldObjectImpl(Operation operation)
13589 {
13590 ObjectMaterializationData& data = m_node->objectMaterializationData();
13591
13592 Vector<LValue, JSClass::numberOfInternalFields> values;
13593 ASSERT(data.m_properties.size() == JSClass::numberOfInternalFields);
13594 for (unsigned i = 0; i < data.m_properties.size(); ++i)
13595 values.append(lowJSValue(m_graph.varArgChild(m_node, 1 + i)));
13596
13597 RegisteredStructure structure = m_node->structure();
13598
13599 LBasicBlock slowPath = m_out.newBlock();
13600 LBasicBlock continuation = m_out.newBlock();
13601
13602 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
13603
13604 RELEASE_ASSERT(data.m_properties.size() == JSClass::numberOfInternalFields);
13605 LValue fastObject = allocateObject<JSClass>(structure, m_out.intPtrZero, slowPath);
13606 ValueFromBlock fastResult = m_out.anchor(fastObject);
13607 m_out.jump(continuation);
13608
13609 m_out.appendTo(slowPath, continuation);
13610 VM& vm = this->vm();
13611 LValue callResult = lazySlowPath(
13612 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13613 return createLazyCallGenerator(vm,
13614 operation, locations[0].directGPR(), &vm,
13615 CCallHelpers::TrustedImmPtr(structure.get()));
13616 });
13617 ValueFromBlock slowResult = m_out.anchor(callResult);
13618 m_out.jump(continuation);
13619
13620 m_out.appendTo(continuation, lastNext);
13621 LValue object = m_out.phi(pointerType(), fastResult, slowResult);
13622 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
13623 PromotedLocationDescriptor descriptor = data.m_properties[i];
13624 ASSERT(descriptor.kind() == InternalFieldObjectPLoc);
13625 RELEASE_ASSERT(descriptor.info() < JSClass::numberOfInternalFields);
13626 m_out.store64(values[i], object, m_heaps.JSInternalFieldObjectImpl_internalFields[descriptor.info()]);
13627 }
13628
13629 mutatorFence();
13630 setJSValue(object);
13631 }
13632
13633 void compileMaterializeNewInternalFieldObject()
13634 {
13635 switch (m_node->structure()->typeInfo().type()) {
13636 case JSArrayIteratorType:
13637 compileMaterializeNewInternalFieldObjectImpl<JSArrayIterator>(operationNewArrayIterator);
13638 break;
13639 case JSMapIteratorType:
13640 compileMaterializeNewInternalFieldObjectImpl<JSMapIterator>(operationNewMapIterator);
13641 break;
13642 case JSSetIteratorType:
13643 compileMaterializeNewInternalFieldObjectImpl<JSSetIterator>(operationNewSetIterator);
13644 break;
13645 case JSPromiseType:
13646 if (m_node->structure()->classInfo() == JSInternalPromise::info())
13647 compileMaterializeNewInternalFieldObjectImpl<JSInternalPromise>(operationNewInternalPromise);
13648 else {
13649 ASSERT(m_node->structure()->classInfo() == JSPromise::info());
13650 compileMaterializeNewInternalFieldObjectImpl<JSPromise>(operationNewPromise);
13651 }
13652 break;
13653 default:
13654 DFG_CRASH(m_graph, m_node, "Bad structure");
13655 }
13656 }
13657
13658 void compileCheckTraps()
13659 {
13660 ASSERT(Options::usePollingTraps());
13661 LBasicBlock needTrapHandling = m_out.newBlock();
13662 LBasicBlock continuation = m_out.newBlock();
13663
13664 LValue trapBits = m_out.load32(m_out.absolute(vm().traps().trapBitsAddress()));
13665 m_out.branch(m_out.testIsZero32(trapBits, m_out.constInt32(VMTraps::AsyncEvents)),
13666 usually(continuation), rarely(needTrapHandling));
13667
13668 LBasicBlock lastNext = m_out.appendTo(needTrapHandling, continuation);
13669
13670 VM& vm = this->vm();
13671 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13672 lazySlowPath(
13673 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
13674 return createLazyCallGenerator(vm, operationHandleTraps, InvalidGPRReg, globalObject);
13675 });
13676 m_out.jump(continuation);
13677
13678 m_out.appendTo(continuation, lastNext);
13679 }
13680
13681 void compileRegExpExec()
13682 {
13683 LValue globalObject = lowCell(m_node->child1());
13684
13685 if (m_node->child2().useKind() == RegExpObjectUse) {
13686 LValue base = lowRegExpObject(m_node->child2());
13687
13688 if (m_node->child3().useKind() == StringUse) {
13689 LValue argument = lowString(m_node->child3());
13690 LValue result = vmCall(Int64, operationRegExpExecString, globalObject, base, argument);
13691 setJSValue(result);
13692 return;
13693 }
13694
13695 LValue argument = lowJSValue(m_node->child3());
13696 LValue result = vmCall(Int64, operationRegExpExec, globalObject, base, argument);
13697 setJSValue(result);
13698 return;
13699 }
13700
13701 LValue base = lowJSValue(m_node->child2());
13702 LValue argument = lowJSValue(m_node->child3());
13703 LValue result = vmCall(Int64, operationRegExpExecGeneric, globalObject, base, argument);
13704 setJSValue(result);
13705 }
13706
13707 void compileRegExpExecNonGlobalOrSticky()
13708 {
13709 LValue globalObject = lowCell(m_node->child1());
13710 LValue argument = lowString(m_node->child2());
13711 LValue result = vmCall(Int64, operationRegExpExecNonGlobalOrSticky, globalObject, frozenPointer(m_node->cellOperand()), argument);
13712 setJSValue(result);
13713 }
13714
13715 void compileRegExpMatchFastGlobal()
13716 {
13717 LValue globalObject = lowCell(m_node->child1());
13718 LValue argument = lowString(m_node->child2());
13719 LValue result = vmCall(Int64, operationRegExpMatchFastGlobalString, globalObject, frozenPointer(m_node->cellOperand()), argument);
13720 setJSValue(result);
13721 }
13722
13723 void compileRegExpTest()
13724 {
13725 LValue globalObject = lowCell(m_node->child1());
13726
13727 if (m_node->child2().useKind() == RegExpObjectUse) {
13728 LValue base = lowRegExpObject(m_node->child2());
13729
13730 if (m_node->child3().useKind() == StringUse) {
13731 LValue argument = lowString(m_node->child3());
13732 LValue result = vmCall(Int32, operationRegExpTestString, globalObject, base, argument);
13733 setBoolean(result);
13734 return;
13735 }
13736
13737 LValue argument = lowJSValue(m_node->child3());
13738 LValue result = vmCall(Int32, operationRegExpTest, globalObject, base, argument);
13739 setBoolean(result);
13740 return;
13741 }
13742
13743 LValue base = lowJSValue(m_node->child2());
13744 LValue argument = lowJSValue(m_node->child3());
13745 LValue result = vmCall(Int32, operationRegExpTestGeneric, globalObject, base, argument);
13746 setBoolean(result);
13747 }
13748
13749 void compileRegExpMatchFast()
13750 {
13751 LValue globalObject = lowCell(m_node->child1());
13752 LValue base = lowRegExpObject(m_node->child2());
13753 LValue argument = lowString(m_node->child3());
13754 LValue result = vmCall(Int64, operationRegExpMatchFastString, globalObject, base, argument);
13755 setJSValue(result);
13756 }
13757
13758 void compileNewRegexp()
13759 {
13760 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13761 FrozenValue* regexp = m_node->cellOperand();
13762 LValue lastIndex = lowJSValue(m_node->child1());
13763 ASSERT(regexp->cell()->inherits<RegExp>(vm()));
13764
13765 LBasicBlock slowCase = m_out.newBlock();
13766 LBasicBlock continuation = m_out.newBlock();
13767
13768 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
13769
13770 auto structure = m_graph.registerStructure(globalObject->regExpStructure());
13771 LValue fastResultValue = allocateObject<RegExpObject>(structure, m_out.intPtrZero, slowCase);
13772 m_out.storePtr(frozenPointer(regexp), fastResultValue, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag);
13773 m_out.store64(lastIndex, fastResultValue, m_heaps.RegExpObject_lastIndex);
13774 mutatorFence();
13775 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
13776 m_out.jump(continuation);
13777
13778 m_out.appendTo(slowCase, continuation);
13779 VM& vm = this->vm();
13780 RegExp* regexpCell = regexp->cast<RegExp*>();
13781 LValue slowResultValue = lazySlowPath(
13782 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13783 return createLazyCallGenerator(vm,
13784 operationNewRegexpWithLastIndex, locations[0].directGPR(), globalObject,
13785 CCallHelpers::TrustedImmPtr(regexpCell), locations[1].directGPR());
13786 }, lastIndex);
13787 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13788 m_out.jump(continuation);
13789
13790 m_out.appendTo(continuation, lastNext);
13791 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
13792 }
13793
13794 void compileSetFunctionName()
13795 {
13796 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13797 vmCall(Void, operationSetFunctionName, weakPointer(globalObject),
13798 lowCell(m_node->child1()), lowJSValue(m_node->child2()));
13799 }
13800
13801 void compileStringReplace()
13802 {
13803 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
13804 if (m_node->child1().useKind() == StringUse
13805 && m_node->child2().useKind() == RegExpObjectUse
13806 && m_node->child3().useKind() == StringUse) {
13807
13808 if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>(vm())) {
13809 if (!replace->length()) {
13810 LValue string = lowString(m_node->child1());
13811 LValue regExp = lowRegExpObject(m_node->child2());
13812
13813 LValue result = vmCall(pointerType(), operationStringProtoFuncReplaceRegExpEmptyStr, weakPointer(globalObject), string, regExp);
13814
13815 setJSValue(result);
13816 return;
13817 }
13818 }
13819
13820 LValue string = lowString(m_node->child1());
13821 LValue regExp = lowRegExpObject(m_node->child2());
13822 LValue replace = lowString(m_node->child3());
13823
13824 LValue result = vmCall(pointerType(), operationStringProtoFuncReplaceRegExpString, weakPointer(globalObject), string, regExp, replace);
13825
13826 setJSValue(result);
13827 return;
13828 }
13829
13830 LValue search;
13831 if (m_node->child2().useKind() == StringUse)
13832 search = lowString(m_node->child2());
13833 else
13834 search = lowJSValue(m_node->child2());
13835
13836 LValue result = vmCall(
13837 pointerType(), operationStringProtoFuncReplaceGeneric,
13838 weakPointer(globalObject),
13839 lowJSValue(m_node->child1()), search,
13840 lowJSValue(m_node->child3()));
13841
13842 setJSValue(result);
13843 }
13844
13845 void compileGetRegExpObjectLastIndex()
13846 {
13847 setJSValue(m_out.load64(lowRegExpObject(m_node->child1()), m_heaps.RegExpObject_lastIndex));
13848 }
13849
13850 void compileSetRegExpObjectLastIndex()
13851 {
13852 if (!m_node->ignoreLastIndexIsWritable()) {
13853 LValue regExp = lowRegExpObject(m_node->child1());
13854 LValue value = lowJSValue(m_node->child2());
13855
13856 speculate(
13857 ExoticObjectMode, noValue(), nullptr,
13858 m_out.testNonZeroPtr(
13859 m_out.loadPtr(regExp, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag),
13860 m_out.constIntPtr(RegExpObject::lastIndexIsNotWritableFlag)));
13861
13862 m_out.store64(value, regExp, m_heaps.RegExpObject_lastIndex);
13863 return;
13864 }
13865
13866 m_out.store64(lowJSValue(m_node->child2()), lowCell(m_node->child1()), m_heaps.RegExpObject_lastIndex);
13867 }
13868
13869 void compileLogShadowChickenPrologue()
13870 {
13871 LValue packet = ensureShadowChickenPacket();
13872 LValue scope = lowCell(m_node->child1());
13873
13874 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
13875 m_out.storePtr(m_out.loadPtr(addressFor(VirtualRegister(0))), packet, m_heaps.ShadowChicken_Packet_callerFrame);
13876 m_out.storePtr(m_out.loadPtr(payloadFor(VirtualRegister(CallFrameSlot::callee))), packet, m_heaps.ShadowChicken_Packet_callee);
13877 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
13878 }
13879
13880 void compileLogShadowChickenTail()
13881 {
13882 LValue packet = ensureShadowChickenPacket();
13883 LValue thisValue = lowJSValue(m_node->child1());
13884 LValue scope = lowCell(m_node->child2());
13885 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.codeOrigins->addCodeOrigin(m_origin.semantic);
13886
13887 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
13888 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(ShadowChicken::Packet::tailMarker())), packet, m_heaps.ShadowChicken_Packet_callee);
13889 m_out.store64(thisValue, packet, m_heaps.ShadowChicken_Packet_thisValue);
13890 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
13891 // We don't want the CodeBlock to have a weak pointer to itself because
13892 // that would cause it to always get collected.
13893 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), packet, m_heaps.ShadowChicken_Packet_codeBlock);
13894 m_out.store32(m_out.constInt32(callSiteIndex.bits()), packet, m_heaps.ShadowChicken_Packet_callSiteIndex);
13895 }
13896
13897 void compileRecordRegExpCachedResult()
13898 {
13899 Edge globalObjectEdge = m_graph.varArgChild(m_node, 0);
13900 Edge regExpEdge = m_graph.varArgChild(m_node, 1);
13901 Edge stringEdge = m_graph.varArgChild(m_node, 2);
13902 Edge startEdge = m_graph.varArgChild(m_node, 3);
13903 Edge endEdge = m_graph.varArgChild(m_node, 4);
13904
13905 LValue globalObject = lowCell(globalObjectEdge);
13906 LValue regExp = lowCell(regExpEdge);
13907 LValue string = lowCell(stringEdge);
13908 LValue start = lowInt32(startEdge);
13909 LValue end = lowInt32(endEdge);
13910
13911 m_out.storePtr(regExp, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastRegExp);
13912 m_out.storePtr(string, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastInput);
13913 m_out.store32(start, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_start);
13914 m_out.store32(end, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_end);
13915 m_out.store32As8(
13916 m_out.constInt32(0),
13917 m_out.address(globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_reified));
13918 }
13919
13920 struct ArgumentsLength {
13921 ArgumentsLength()
13922 : isKnown(false)
13923 , known(UINT_MAX)
13924 , value(nullptr)
13925 {
13926 }
13927
13928 bool isKnown;
13929 unsigned known;
13930 LValue value;
13931 };
13932 ArgumentsLength getArgumentsLength(InlineCallFrame* inlineCallFrame)
13933 {
13934 ArgumentsLength length;
13935
13936 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
13937 length.known = static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1);
13938 length.isKnown = true;
13939 length.value = m_out.constInt32(length.known);
13940 } else {
13941 length.known = UINT_MAX;
13942 length.isKnown = false;
13943
13944 VirtualRegister argumentCountRegister;
13945 if (!inlineCallFrame)
13946 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCountIncludingThis);
13947 else
13948 argumentCountRegister = inlineCallFrame->argumentCountRegister;
13949 length.value = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
13950 }
13951
13952 return length;
13953 }
13954
13955 ArgumentsLength getArgumentsLength()
13956 {
13957 return getArgumentsLength(m_origin.semantic.inlineCallFrame());
13958 }
13959
13960 LValue getCurrentCallee()
13961 {
13962 if (InlineCallFrame* frame = m_origin.semantic.inlineCallFrame()) {
13963 if (frame->isClosureCall)
13964 return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister()));
13965 return weakPointer(frame->calleeRecovery.constant().asCell());
13966 }
13967 return m_out.loadPtr(addressFor(VirtualRegister(CallFrameSlot::callee)));
13968 }
13969
13970 LValue getArgumentsStart(InlineCallFrame* inlineCallFrame, unsigned offset = 0)
13971 {
13972 VirtualRegister start = AssemblyHelpers::argumentsStart(inlineCallFrame) + offset;
13973 return addressFor(start).value();
13974 }
13975
13976 LValue getArgumentsStart()
13977 {
13978 return getArgumentsStart(m_origin.semantic.inlineCallFrame());
13979 }
13980
13981 template<typename Functor>
13982 void checkStructure(
13983 LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind,
13984 const RegisteredStructureSet& set, const Functor& weakStructureDiscriminant)
13985 {
13986 if (set.isEmpty()) {
13987 terminate(exitKind);
13988 return;
13989 }
13990
13991 if (set.size() == 1) {
13992 speculate(
13993 exitKind, formattedValue, nullptr,
13994 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set[0])));
13995 return;
13996 }
13997
13998 LBasicBlock continuation = m_out.newBlock();
13999
14000 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
14001 for (unsigned i = 0; i < set.size() - 1; ++i) {
14002 LBasicBlock nextStructure = m_out.newBlock();
14003 m_out.branch(
14004 m_out.equal(structureDiscriminant, weakStructureDiscriminant(set[i])),
14005 unsure(continuation), unsure(nextStructure));
14006 m_out.appendTo(nextStructure);
14007 }
14008
14009 speculate(
14010 exitKind, formattedValue, nullptr,
14011 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set.last())));
14012
14013 m_out.jump(continuation);
14014 m_out.appendTo(continuation, lastNext);
14015 }
14016
14017 LValue numberOrNotCellNorBigIntToInt32(Edge edge, LValue value)
14018 {
14019 LBasicBlock intCase = m_out.newBlock();
14020 LBasicBlock notIntCase = m_out.newBlock();
14021 LBasicBlock doubleCase = nullptr;
14022 LBasicBlock notNumberCase = nullptr;
14023 if (edge.useKind() == NotCellNorBigIntUse) {
14024 doubleCase = m_out.newBlock();
14025 notNumberCase = m_out.newBlock();
14026 }
14027 LBasicBlock continuation = m_out.newBlock();
14028
14029 Vector<ValueFromBlock> results;
14030
14031 m_out.branch(isNotInt32(value), unsure(notIntCase), unsure(intCase));
14032
14033 LBasicBlock lastNext = m_out.appendTo(intCase, notIntCase);
14034 results.append(m_out.anchor(unboxInt32(value)));
14035 m_out.jump(continuation);
14036
14037 if (edge.useKind() == NumberUse) {
14038 m_out.appendTo(notIntCase, continuation);
14039 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isCellOrMiscOrBigInt32(value));
14040 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
14041 m_out.jump(continuation);
14042 } else {
14043 m_out.appendTo(notIntCase, doubleCase);
14044 m_out.branch(isCellOrMiscOrBigInt32(value, provenType(edge)), unsure(notNumberCase), unsure(doubleCase));
14045
14046 m_out.appendTo(doubleCase, notNumberCase);
14047 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
14048 m_out.jump(continuation);
14049
14050 m_out.appendTo(notNumberCase, continuation);
14051
14052 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecCellCheck, isCell(value));
14053#if USE(BIGINT32)
14054 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecCellCheck & ~SpecBigInt, isBigInt32(value));
14055#endif
14056
14057 LValue specialResult = m_out.select(
14058 m_out.equal(value, m_out.constInt64(JSValue::encode(jsBoolean(true)))),
14059 m_out.int32One, m_out.int32Zero);
14060 results.append(m_out.anchor(specialResult));
14061 m_out.jump(continuation);
14062 }
14063
14064 m_out.appendTo(continuation, lastNext);
14065 return m_out.phi(Int32, results);
14066 }
14067
14068 LValue loadProperty(LValue storage, unsigned identifierNumber, PropertyOffset offset)
14069 {
14070 return m_out.load64(addressOfProperty(storage, identifierNumber, offset));
14071 }
14072
14073 void storeProperty(
14074 LValue value, LValue storage, unsigned identifierNumber, PropertyOffset offset)
14075 {
14076 m_out.store64(value, addressOfProperty(storage, identifierNumber, offset));
14077 }
14078
14079 TypedPointer addressOfProperty(
14080 LValue storage, unsigned identifierNumber, PropertyOffset offset)
14081 {
14082 return m_out.address(
14083 m_heaps.properties[identifierNumber], storage, offsetRelativeToBase(offset));
14084 }
14085
14086 LValue storageForTransition(
14087 LValue object, PropertyOffset offset,
14088 Structure* previousStructure, Structure* nextStructure)
14089 {
14090 if (isInlineOffset(offset))
14091 return object;
14092
14093 if (previousStructure->outOfLineCapacity() == nextStructure->outOfLineCapacity())
14094 return m_out.loadPtr(object, m_heaps.JSObject_butterfly);
14095
14096 LValue result;
14097 if (!previousStructure->outOfLineCapacity())
14098 result = allocatePropertyStorage(object, previousStructure);
14099 else {
14100 result = reallocatePropertyStorage(
14101 object, m_out.loadPtr(object, m_heaps.JSObject_butterfly),
14102 previousStructure, nextStructure);
14103 }
14104
14105 nukeStructureAndSetButterfly(result, object);
14106 return result;
14107 }
14108
14109 void initializeArrayElements(LValue indexingType, LValue begin, LValue end, LValue butterfly)
14110 {
14111
14112 if (begin == end)
14113 return;
14114
14115 if (indexingType->hasInt32()) {
14116 IndexingType rawIndexingType = static_cast<IndexingType>(indexingType->asInt32());
14117 if (hasUndecided(rawIndexingType))
14118 return;
14119 IndexedAbstractHeap* heap = m_heaps.forIndexingType(rawIndexingType);
14120 DFG_ASSERT(m_graph, m_node, heap);
14121
14122 LValue hole;
14123 if (hasDouble(rawIndexingType))
14124 hole = m_out.constInt64(bitwise_cast<int64_t>(PNaN));
14125 else
14126 hole = m_out.constInt64(JSValue::encode(JSValue()));
14127
14128 splatWords(butterfly, begin, end, hole, heap->atAnyIndex());
14129 } else {
14130 LValue hole = m_out.select(
14131 m_out.equal(m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)), m_out.constInt32(DoubleShape)),
14132 m_out.constInt64(bitwise_cast<int64_t>(PNaN)),
14133 m_out.constInt64(JSValue::encode(JSValue())));
14134 splatWords(butterfly, begin, end, hole, m_heaps.root);
14135 }
14136 }
14137
14138 void splatWords(LValue base, LValue begin, LValue end, LValue value, const AbstractHeap& heap)
14139 {
14140 const uint64_t unrollingLimit = 10;
14141 if (begin->hasInt() && end->hasInt()) {
14142 uint64_t beginConst = static_cast<uint64_t>(begin->asInt());
14143 uint64_t endConst = static_cast<uint64_t>(end->asInt());
14144
14145 if (endConst - beginConst <= unrollingLimit) {
14146 for (uint64_t i = beginConst; i < endConst; ++i) {
14147 LValue pointer = m_out.add(base, m_out.constIntPtr(i * sizeof(uint64_t)));
14148 m_out.store64(value, TypedPointer(heap, pointer));
14149 }
14150 return;
14151 }
14152 }
14153
14154 LBasicBlock initLoop = m_out.newBlock();
14155 LBasicBlock initDone = m_out.newBlock();
14156
14157 LBasicBlock lastNext = m_out.insertNewBlocksBefore(initLoop);
14158
14159 ValueFromBlock originalIndex = m_out.anchor(end);
14160 ValueFromBlock originalPointer = m_out.anchor(
14161 m_out.add(base, m_out.shl(m_out.signExt32ToPtr(begin), m_out.constInt32(3))));
14162 m_out.branch(m_out.notEqual(end, begin), unsure(initLoop), unsure(initDone));
14163
14164 m_out.appendTo(initLoop, initDone);
14165 LValue index = m_out.phi(Int32, originalIndex);
14166 LValue pointer = m_out.phi(pointerType(), originalPointer);
14167
14168 m_out.store64(value, TypedPointer(heap, pointer));
14169
14170 LValue nextIndex = m_out.sub(index, m_out.int32One);
14171 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
14172 m_out.addIncomingToPhi(pointer, m_out.anchor(m_out.add(pointer, m_out.intPtrEight)));
14173 m_out.branch(
14174 m_out.notEqual(nextIndex, begin), unsure(initLoop), unsure(initDone));
14175
14176 m_out.appendTo(initDone, lastNext);
14177 }
14178
14179 LValue allocatePropertyStorage(LValue object, Structure* previousStructure)
14180 {
14181 if (previousStructure->couldHaveIndexingHeader()) {
14182 return vmCall(
14183 pointerType(),
14184 operationAllocateComplexPropertyStorageWithInitialCapacity,
14185 m_vmValue, object);
14186 }
14187
14188 LValue result = allocatePropertyStorageWithSizeImpl(initialOutOfLineCapacity);
14189
14190 splatWords(
14191 result,
14192 m_out.constInt32(-initialOutOfLineCapacity - 1), m_out.constInt32(-1),
14193 m_out.int64Zero, m_heaps.properties.atAnyNumber());
14194
14195 return result;
14196 }
14197
14198 LValue reallocatePropertyStorage(
14199 LValue object, LValue oldStorage, Structure* previous, Structure* next)
14200 {
14201 size_t oldSize = previous->outOfLineCapacity();
14202 size_t newSize = oldSize * outOfLineGrowthFactor;
14203
14204 ASSERT_UNUSED(next, newSize == next->outOfLineCapacity());
14205
14206 if (previous->couldHaveIndexingHeader()) {
14207 LValue newAllocSize = m_out.constIntPtr(newSize);
14208 return vmCall(pointerType(), operationAllocateComplexPropertyStorage, m_vmValue, object, newAllocSize);
14209 }
14210
14211 LValue result = allocatePropertyStorageWithSizeImpl(newSize);
14212
14213 ptrdiff_t headerSize = -sizeof(IndexingHeader) - sizeof(void*);
14214 ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize * sizeof(JSValue));
14215
14216 for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) {
14217 LValue loaded =
14218 m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset));
14219 m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset));
14220 }
14221
14222 splatWords(
14223 result,
14224 m_out.constInt32(-newSize - 1), m_out.constInt32(-oldSize - 1),
14225 m_out.int64Zero, m_heaps.properties.atAnyNumber());
14226
14227 return result;
14228 }
14229
14230 LValue allocatePropertyStorageWithSizeImpl(size_t sizeInValues)
14231 {
14232 LBasicBlock slowPath = m_out.newBlock();
14233 LBasicBlock continuation = m_out.newBlock();
14234
14235 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
14236
14237 size_t sizeInBytes = sizeInValues * sizeof(JSValue);
14238 Allocator allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(sizeInBytes, AllocatorForMode::AllocatorIfExists);
14239 LValue startOfStorage = allocateHeapCell(
14240 m_out.constIntPtr(allocator.localAllocator()), slowPath);
14241 ValueFromBlock fastButterfly = m_out.anchor(
14242 m_out.add(m_out.constIntPtr(sizeInBytes + sizeof(IndexingHeader)), startOfStorage));
14243 m_out.jump(continuation);
14244
14245 m_out.appendTo(slowPath, continuation);
14246
14247 LValue slowButterflyValue;
14248 VM& vm = this->vm();
14249 if (sizeInValues == initialOutOfLineCapacity) {
14250 slowButterflyValue = lazySlowPath(
14251 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14252 return createLazyCallGenerator(vm,
14253 operationAllocateSimplePropertyStorageWithInitialCapacity,
14254 locations[0].directGPR(), &vm);
14255 });
14256 } else {
14257 slowButterflyValue = lazySlowPath(
14258 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14259 return createLazyCallGenerator(vm,
14260 operationAllocateSimplePropertyStorage, locations[0].directGPR(), &vm,
14261 CCallHelpers::TrustedImmPtr(sizeInValues));
14262 });
14263 }
14264 ValueFromBlock slowButterfly = m_out.anchor(slowButterflyValue);
14265
14266 m_out.jump(continuation);
14267
14268 m_out.appendTo(continuation, lastNext);
14269
14270 return m_out.phi(pointerType(), fastButterfly, slowButterfly);
14271 }
14272
14273 LValue getById(LValue base, AccessType type)
14274 {
14275 Node* node = m_node;
14276 CacheableIdentifier identifier = node->cacheableIdentifier();
14277
14278 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
14279 patchpoint->appendSomeRegister(base);
14280 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
14281 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
14282 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 1 : 0;
14283
14284 // FIXME: If this is a GetByIdFlush/GetByIdDirectFlush, we might get some performance boost if we claim that it
14285 // clobbers volatile registers late. It's not necessary for correctness, though, since the
14286 // IC code is super smart about saving registers.
14287 // https://bugs.webkit.org/show_bug.cgi?id=152848
14288
14289 patchpoint->clobber(RegisterSet::macroScratchRegisters());
14290
14291 RefPtr<PatchpointExceptionHandle> exceptionHandle =
14292 preparePatchpointForExceptions(patchpoint);
14293
14294 State* state = &m_ftlState;
14295 CodeOrigin semanticNodeOrigin = node->origin.semantic;
14296 patchpoint->setGenerator(
14297 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14298 AllowMacroScratchRegisterUsage allowScratch(jit);
14299
14300 CallSiteIndex callSiteIndex =
14301 state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(semanticNodeOrigin);
14302
14303 // This is the direct exit target for operation calls.
14304 Box<CCallHelpers::JumpList> exceptions =
14305 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
14306
14307 // This is the exit for call IC's created by the getById for getters. We don't have
14308 // to do anything weird other than call this, since it will associate the exit with
14309 // the callsite index.
14310 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
14311
14312 GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(0) : InvalidGPRReg;
14313
14314 auto generator = Box<JITGetByIdGenerator>::create(
14315 jit.codeBlock(), JITType::FTLJIT, semanticNodeOrigin, callSiteIndex,
14316 params.unavailableRegisters(), identifier, JSValueRegs(params[1].gpr()),
14317 JSValueRegs(params[0].gpr()), stubInfoGPR, type);
14318
14319 generator->generateFastPath(jit);
14320 CCallHelpers::Label done = jit.label();
14321
14322 params.addLatePath(
14323 [=] (CCallHelpers& jit) {
14324 AllowMacroScratchRegisterUsage allowScratch(jit);
14325
14326 auto optimizationFunction = appropriateOptimizingGetByIdFunction(type);
14327
14328 generator->slowPathJump().link(&jit);
14329 CCallHelpers::Label slowPathBegin = jit.label();
14330 CCallHelpers::Call slowPathCall;
14331 if (JITCode::useDataIC(JITType::FTLJIT)) {
14332 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
14333 generator->stubInfo()->m_slowOperation = optimizationFunction;
14334 slowPathCall = callOperation(
14335 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
14336 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), params[0].gpr(),
14337 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
14338 stubInfoGPR, params[1].gpr(),
14339 CCallHelpers::TrustedImmPtr(identifier.rawBits())).call();
14340 } else {
14341 slowPathCall = callOperation(
14342 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
14343 exceptions.get(), optimizationFunction, params[0].gpr(),
14344 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
14345 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
14346 CCallHelpers::TrustedImmPtr(identifier.rawBits())).call();
14347 }
14348 jit.jump().linkTo(done, &jit);
14349
14350 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
14351
14352 jit.addLinkTask(
14353 [=] (LinkBuffer& linkBuffer) {
14354 generator->finalize(linkBuffer, linkBuffer);
14355 });
14356 });
14357 });
14358
14359 return patchpoint;
14360 }
14361
14362 LValue getByIdWithThis(LValue base, LValue thisValue)
14363 {
14364 Node* node = m_node;
14365 CacheableIdentifier identifier = node->cacheableIdentifier();
14366
14367 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
14368 patchpoint->appendSomeRegister(base);
14369 patchpoint->appendSomeRegister(thisValue);
14370 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
14371 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
14372 patchpoint->clobber(RegisterSet::macroScratchRegisters());
14373 patchpoint->numGPScratchRegisters = JITCode::useDataIC(JITType::FTLJIT) ? 1 : 0;
14374
14375 RefPtr<PatchpointExceptionHandle> exceptionHandle =
14376 preparePatchpointForExceptions(patchpoint);
14377
14378 State* state = &m_ftlState;
14379 CodeOrigin semanticNodeOrigin = node->origin.semantic;
14380 patchpoint->setGenerator(
14381 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14382 AllowMacroScratchRegisterUsage allowScratch(jit);
14383
14384 CallSiteIndex callSiteIndex =
14385 state->jitCode->common.codeOrigins->addUniqueCallSiteIndex(semanticNodeOrigin);
14386
14387 // This is the direct exit target for operation calls.
14388 Box<CCallHelpers::JumpList> exceptions =
14389 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
14390
14391 // This is the exit for call IC's created by the getById for getters. We don't have
14392 // to do anything weird other than call this, since it will associate the exit with
14393 // the callsite index.
14394 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
14395
14396 GPRReg stubInfoGPR = JITCode::useDataIC(JITType::FTLJIT) ? params.gpScratch(0) : InvalidGPRReg;
14397
14398 auto generator = Box<JITGetByIdWithThisGenerator>::create(
14399 jit.codeBlock(), JITType::FTLJIT, semanticNodeOrigin, callSiteIndex,
14400 params.unavailableRegisters(), identifier, JSValueRegs(params[0].gpr()),
14401 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), stubInfoGPR);
14402
14403 generator->generateFastPath(jit);
14404 CCallHelpers::Label done = jit.label();
14405
14406 params.addLatePath(
14407 [=] (CCallHelpers& jit) {
14408 AllowMacroScratchRegisterUsage allowScratch(jit);
14409
14410 auto optimizationFunction = operationGetByIdWithThisOptimize;
14411
14412 generator->slowPathJump().link(&jit);
14413 CCallHelpers::Label slowPathBegin = jit.label();
14414 CCallHelpers::Call slowPathCall;
14415 if (JITCode::useDataIC(JITType::FTLJIT)) {
14416 jit.move(CCallHelpers::TrustedImmPtr(generator->stubInfo()), stubInfoGPR);
14417 generator->stubInfo()->m_slowOperation = optimizationFunction;
14418 slowPathCall = callOperation(
14419 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
14420 exceptions.get(), CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), params[0].gpr(),
14421 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
14422 stubInfoGPR, params[1].gpr(),
14423 params[2].gpr(), CCallHelpers::TrustedImmPtr(identifier.rawBits())).call();
14424 } else {
14425 slowPathCall = callOperation(
14426 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
14427 exceptions.get(), optimizationFunction, params[0].gpr(),
14428 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
14429 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
14430 params[2].gpr(), CCallHelpers::TrustedImmPtr(identifier.rawBits())).call();
14431 }
14432 jit.jump().linkTo(done, &jit);
14433
14434 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
14435
14436 jit.addLinkTask(
14437 [=] (LinkBuffer& linkBuffer) {
14438 generator->finalize(linkBuffer, linkBuffer);
14439 });
14440 });
14441 });
14442
14443 return patchpoint;
14444 }
14445
14446 LValue isFastTypedArray(LValue object)
14447 {
14448 return m_out.equal(
14449 m_out.load32(object, m_heaps.JSArrayBufferView_mode),
14450 m_out.constInt32(FastTypedArray));
14451 }
14452
14453 TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0)
14454 {
14455 return m_out.baseIndex(
14456 heap, storage, m_out.zeroExtPtr(index), provenValue(edge), offset);
14457 }
14458
14459 template<typename IntFunctor, typename DoubleFunctor>
14460 void compare(
14461 const IntFunctor& intFunctor, const DoubleFunctor& doubleFunctor,
14462 C_JITOperation_TT stringIdentFunction,
14463 C_JITOperation_B_GJssJss stringFunction,
14464 S_JITOperation_GJJ fallbackFunction)
14465 {
14466 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14467 if (m_node->isBinaryUseKind(Int32Use)) {
14468 LValue left = lowInt32(m_node->child1());
14469 LValue right = lowInt32(m_node->child2());
14470 setBoolean(intFunctor(left, right));
14471 return;
14472 }
14473
14474 if (m_node->isBinaryUseKind(Int52RepUse)) {
14475 Int52Kind kind;
14476 LValue left = lowWhicheverInt52(m_node->child1(), kind);
14477 LValue right = lowInt52(m_node->child2(), kind);
14478 setBoolean(intFunctor(left, right));
14479 return;
14480 }
14481
14482 if (m_node->isBinaryUseKind(DoubleRepUse)) {
14483 LValue left = lowDouble(m_node->child1());
14484 LValue right = lowDouble(m_node->child2());
14485 setBoolean(doubleFunctor(left, right));
14486 return;
14487 }
14488
14489#if USE(BIGINT32)
14490 if (m_node->isBinaryUseKind(BigInt32Use)) {
14491 LValue left = lowBigInt32(m_node->child1());
14492 LValue right = lowBigInt32(m_node->child2());
14493 setBoolean(intFunctor(unboxBigInt32(left), unboxBigInt32(right)));
14494 return;
14495 }
14496#endif
14497
14498 if (m_node->isBinaryUseKind(StringIdentUse)) {
14499 LValue left = lowStringIdent(m_node->child1());
14500 LValue right = lowStringIdent(m_node->child2());
14501 setBoolean(m_out.callWithoutSideEffects(Int32, stringIdentFunction, left, right));
14502 return;
14503 }
14504
14505 if (m_node->isBinaryUseKind(StringUse)) {
14506 LValue left = lowCell(m_node->child1());
14507 LValue right = lowCell(m_node->child2());
14508 speculateString(m_node->child1(), left);
14509 speculateString(m_node->child2(), right);
14510
14511 LValue result = vmCall(
14512 Int32, stringFunction,
14513 weakPointer(globalObject), left, right);
14514 setBoolean(result);
14515 return;
14516 }
14517
14518 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse) || m_node->isBinaryUseKind(HeapBigIntUse) || m_node->isBinaryUseKind(AnyBigIntUse), m_node->child1().useKind(), m_node->child2().useKind());
14519 genericJSValueCompare(intFunctor, fallbackFunction);
14520 }
14521
14522 void compileStringSlice()
14523 {
14524 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14525 LBasicBlock lengthCheckCase = m_out.newBlock();
14526 LBasicBlock emptyCase = m_out.newBlock();
14527 LBasicBlock notEmptyCase = m_out.newBlock();
14528 LBasicBlock oneCharCase = m_out.newBlock();
14529 LBasicBlock is8Bit = m_out.newBlock();
14530 LBasicBlock is16Bit = m_out.newBlock();
14531 LBasicBlock bitsContinuation = m_out.newBlock();
14532 LBasicBlock bigCharacter = m_out.newBlock();
14533 LBasicBlock slowCase = m_out.newBlock();
14534 LBasicBlock ropeSlowCase = m_out.newBlock();
14535 LBasicBlock continuation = m_out.newBlock();
14536
14537 LValue string = lowString(m_node->child1());
14538 LValue start = lowInt32(m_node->child2());
14539 LValue end = nullptr;
14540 if (m_node->child3())
14541 end = lowInt32(m_node->child3());
14542 else
14543 end = m_out.constInt32(std::numeric_limits<int32_t>::max());
14544 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropeSlowCase), usually(lengthCheckCase));
14545
14546 LBasicBlock lastNext = m_out.appendTo(lengthCheckCase, emptyCase);
14547 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
14548 LValue length = m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length);
14549 auto range = populateSliceRange(start, end, length);
14550 LValue from = range.first;
14551 LValue to = range.second;
14552 LValue span = m_out.sub(to, from);
14553 m_out.branch(m_out.lessThanOrEqual(span, m_out.int32Zero), unsure(emptyCase), unsure(notEmptyCase));
14554
14555 Vector<ValueFromBlock, 5> results;
14556
14557 m_out.appendTo(emptyCase, notEmptyCase);
14558 results.append(m_out.anchor(weakPointer(jsEmptyString(vm()))));
14559 m_out.jump(continuation);
14560
14561 m_out.appendTo(notEmptyCase, oneCharCase);
14562 m_out.branch(m_out.equal(span, m_out.int32One), unsure(oneCharCase), unsure(slowCase));
14563
14564 m_out.appendTo(oneCharCase, is8Bit);
14565 LValue storage = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
14566 m_out.branch(
14567 m_out.testIsZero32(
14568 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
14569 m_out.constInt32(StringImpl::flagIs8Bit())),
14570 unsure(is16Bit), unsure(is8Bit));
14571
14572 m_out.appendTo(is8Bit, is16Bit);
14573 ValueFromBlock char8Bit = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, storage, m_out.zeroExtPtr(from))));
14574 m_out.jump(bitsContinuation);
14575
14576 m_out.appendTo(is16Bit, bigCharacter);
14577 LValue char16BitValue = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(from)));
14578 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
14579 m_out.branch(
14580 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
14581 rarely(bigCharacter), usually(bitsContinuation));
14582
14583 m_out.appendTo(bigCharacter, bitsContinuation);
14584 results.append(m_out.anchor(vmCall(
14585 Int64, operationSingleCharacterString,
14586 m_vmValue, char16BitValue)));
14587 m_out.jump(continuation);
14588
14589 m_out.appendTo(bitsContinuation, slowCase);
14590 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
14591 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
14592 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
14593 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
14594 m_out.jump(continuation);
14595
14596 m_out.appendTo(slowCase, ropeSlowCase);
14597 results.append(m_out.anchor(vmCall(pointerType(), operationStringSubstr, weakPointer(globalObject), string, from, span)));
14598 m_out.jump(continuation);
14599
14600 m_out.appendTo(ropeSlowCase, continuation);
14601 results.append(m_out.anchor(vmCall(pointerType(), operationStringSlice, weakPointer(globalObject), string, start, end)));
14602 m_out.jump(continuation);
14603
14604 m_out.appendTo(continuation, lastNext);
14605 setJSValue(m_out.phi(pointerType(), results));
14606 }
14607
14608 void compileToLowerCase()
14609 {
14610 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14611 LBasicBlock notRope = m_out.newBlock();
14612 LBasicBlock is8Bit = m_out.newBlock();
14613 LBasicBlock loopTop = m_out.newBlock();
14614 LBasicBlock loopBody = m_out.newBlock();
14615 LBasicBlock slowPath = m_out.newBlock();
14616 LBasicBlock continuation = m_out.newBlock();
14617
14618 LValue string = lowString(m_node->child1());
14619 ValueFromBlock startIndex = m_out.anchor(m_out.constInt32(0));
14620 ValueFromBlock startIndexForCall = m_out.anchor(m_out.constInt32(0));
14621 m_out.branch(isRopeString(string, m_node->child1()),
14622 unsure(slowPath), unsure(notRope));
14623
14624 LBasicBlock lastNext = m_out.appendTo(notRope, is8Bit);
14625 LValue impl = m_out.loadPtr(string, m_heaps.JSString_value);
14626 m_out.branch(
14627 m_out.testIsZero32(
14628 m_out.load32(impl, m_heaps.StringImpl_hashAndFlags),
14629 m_out.constInt32(StringImpl::flagIs8Bit())),
14630 unsure(slowPath), unsure(is8Bit));
14631
14632 m_out.appendTo(is8Bit, loopTop);
14633 LValue length = m_out.load32(impl, m_heaps.StringImpl_length);
14634 LValue buffer = m_out.loadPtr(impl, m_heaps.StringImpl_data);
14635 ValueFromBlock fastResult = m_out.anchor(string);
14636 m_out.jump(loopTop);
14637
14638 m_out.appendTo(loopTop, loopBody);
14639 LValue index = m_out.phi(Int32, startIndex);
14640 ValueFromBlock indexFromBlock = m_out.anchor(index);
14641 m_out.branch(m_out.below(index, length),
14642 unsure(loopBody), unsure(continuation));
14643
14644 m_out.appendTo(loopBody, slowPath);
14645
14646 // FIXME: Strings needs to be caged.
14647 // https://bugs.webkit.org/show_bug.cgi?id=174924
14648 LValue byte = m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, buffer, m_out.zeroExtPtr(index)));
14649 LValue isInvalidAsciiRange = m_out.bitAnd(byte, m_out.constInt32(~0x7F));
14650 LValue isUpperCase = m_out.belowOrEqual(m_out.sub(byte, m_out.constInt32('A')), m_out.constInt32('Z' - 'A'));
14651 LValue isBadCharacter = m_out.bitOr(isInvalidAsciiRange, isUpperCase);
14652 m_out.addIncomingToPhi(index, m_out.anchor(m_out.add(index, m_out.int32One)));
14653 m_out.branch(isBadCharacter, unsure(slowPath), unsure(loopTop));
14654
14655 m_out.appendTo(slowPath, continuation);
14656 LValue slowPathIndex = m_out.phi(Int32, startIndexForCall, indexFromBlock);
14657 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), operationToLowerCase, weakPointer(globalObject), string, slowPathIndex));
14658 m_out.jump(continuation);
14659
14660 m_out.appendTo(continuation, lastNext);
14661 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
14662 }
14663
14664 void compileNumberToStringWithRadix()
14665 {
14666 bool validRadixIsGuaranteed = false;
14667 if (m_node->child2()->isInt32Constant()) {
14668 int32_t radix = m_node->child2()->asInt32();
14669 if (radix >= 2 && radix <= 36)
14670 validRadixIsGuaranteed = true;
14671 }
14672
14673 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14674 switch (m_node->child1().useKind()) {
14675 case Int32Use:
14676 setJSValue(vmCall(pointerType(), validRadixIsGuaranteed ? operationInt32ToStringWithValidRadix : operationInt32ToString, weakPointer(globalObject), lowInt32(m_node->child1()), lowInt32(m_node->child2())));
14677 break;
14678 case Int52RepUse:
14679 setJSValue(vmCall(pointerType(), validRadixIsGuaranteed ? operationInt52ToStringWithValidRadix : operationInt52ToString, weakPointer(globalObject), lowStrictInt52(m_node->child1()), lowInt32(m_node->child2())));
14680 break;
14681 case DoubleRepUse:
14682 setJSValue(vmCall(pointerType(), validRadixIsGuaranteed ? operationDoubleToStringWithValidRadix : operationDoubleToString, weakPointer(globalObject), lowDouble(m_node->child1()), lowInt32(m_node->child2())));
14683 break;
14684 default:
14685 RELEASE_ASSERT_NOT_REACHED();
14686 }
14687 }
14688
14689 void compileNumberToStringWithValidRadixConstant()
14690 {
14691 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14692 switch (m_node->child1().useKind()) {
14693 case Int32Use:
14694 setJSValue(vmCall(pointerType(), operationInt32ToStringWithValidRadix, weakPointer(globalObject), lowInt32(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
14695 break;
14696 case Int52RepUse:
14697 setJSValue(vmCall(pointerType(), operationInt52ToStringWithValidRadix, weakPointer(globalObject), lowStrictInt52(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
14698 break;
14699 case DoubleRepUse:
14700 setJSValue(vmCall(pointerType(), operationDoubleToStringWithValidRadix, weakPointer(globalObject), lowDouble(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
14701 break;
14702 default:
14703 RELEASE_ASSERT_NOT_REACHED();
14704 }
14705 }
14706
14707 void compileResolveScopeForHoistingFuncDeclInEval()
14708 {
14709 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14710 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
14711 setJSValue(vmCall(pointerType(), operationResolveScopeForHoistingFuncDeclInEval, weakPointer(globalObject), lowCell(m_node->child1()), m_out.constIntPtr(uid)));
14712 }
14713
14714 void compileResolveScope()
14715 {
14716 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14717 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
14718 setJSValue(vmCall(pointerType(), operationResolveScope,
14719 weakPointer(globalObject), lowCell(m_node->child1()), m_out.constIntPtr(uid)));
14720 }
14721
14722 void compileGetDynamicVar()
14723 {
14724 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14725 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
14726 setJSValue(vmCall(Int64, operationGetDynamicVar,
14727 weakPointer(globalObject), lowCell(m_node->child1()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
14728 }
14729
14730 void compilePutDynamicVar()
14731 {
14732 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14733 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
14734 setJSValue(vmCall(Void, m_node->ecmaMode().isStrict() ? operationPutDynamicVarStrict : operationPutDynamicVarNonStrict,
14735 weakPointer(globalObject), lowCell(m_node->child1()), lowJSValue(m_node->child2()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
14736 }
14737
14738 void compileUnreachable()
14739 {
14740 // It's so tempting to assert that AI has proved that this is unreachable. But that's
14741 // simply not a requirement of the Unreachable opcode at all. If you emit an opcode that
14742 // *you* know will not return, then it's fine to end the basic block with Unreachable
14743 // after that opcode. You don't have to also prove to AI that your opcode does not return.
14744 // Hence, there is nothing to do here but emit code that will crash, so that we catch
14745 // cases where you said Unreachable but you lied.
14746 //
14747 // It's also also worth noting that some clients emit this opcode because they're not 100% sure
14748 // if the code is unreachable, but they would really prefer if we crashed rather than kept going
14749 // if it did turn out to be reachable. Hence, this needs to deterministically crash.
14750
14751 crash();
14752 }
14753
14754 void compileCheckJSCast()
14755 {
14756 DFG_ASSERT(m_graph, m_node, m_node->op() == CheckJSCast || m_node->op() == CheckNotJSCast);
14757 LValue cell = lowCell(m_node->child1());
14758
14759 const ClassInfo* classInfo = m_node->classInfo();
14760
14761 if (classInfo->inheritsJSTypeRange) {
14762 LValue hasType = isCellWithType(cell, classInfo->inheritsJSTypeRange.value(), speculationFromClassInfoInheritance(classInfo));
14763 LValue condition = nullptr;
14764 if (m_node->op() == CheckJSCast)
14765 condition = m_out.bitNot(hasType);
14766 else
14767 condition = hasType;
14768 speculate(BadType, jsValueValue(cell), m_node->child1().node(), condition);
14769 return;
14770 }
14771
14772 if (!classInfo->checkSubClassSnippet) {
14773 LBasicBlock loop = m_out.newBlock();
14774 LBasicBlock parentClass = m_out.newBlock();
14775 LBasicBlock continuation = m_out.newBlock();
14776
14777 LValue structure = loadStructure(cell);
14778 LValue classInfo = m_out.loadPtr(structure, m_heaps.Structure_classInfo);
14779 ValueFromBlock otherAtStart = m_out.anchor(classInfo);
14780 m_out.jump(loop);
14781
14782 LBasicBlock lastNext = m_out.appendTo(loop, parentClass);
14783 LValue other = m_out.phi(pointerType(), otherAtStart);
14784 LValue foundCondition = m_out.equal(other, m_out.constIntPtr(classInfo));
14785 if (m_node->op() == CheckNotJSCast) {
14786 speculate(BadType, jsValueValue(cell), m_node->child1().node(), foundCondition);
14787 m_out.jump(parentClass);
14788 } else
14789 m_out.branch(foundCondition, unsure(continuation), unsure(parentClass));
14790
14791 m_out.appendTo(parentClass, continuation);
14792 LValue parent = m_out.loadPtr(other, m_heaps.ClassInfo_parentClass);
14793 LValue parentIsNull = m_out.isNull(parent);
14794 m_out.addIncomingToPhi(other, m_out.anchor(parent));
14795 if (m_node->op() == CheckNotJSCast)
14796 m_out.branch(parentIsNull, unsure(continuation), unsure(loop));
14797 else {
14798 speculate(BadType, jsValueValue(cell), m_node->child1().node(), parentIsNull);
14799 m_out.jump(loop);
14800 }
14801
14802 m_out.appendTo(continuation, lastNext);
14803 return;
14804 }
14805
14806 RefPtr<Snippet> domJIT = classInfo->checkSubClassSnippet();
14807 PatchpointValue* patchpoint = m_out.patchpoint(Void);
14808 patchpoint->appendSomeRegister(cell);
14809 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
14810 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
14811
14812 NodeOrigin origin = m_origin;
14813 unsigned osrExitArgumentOffset = patchpoint->numChildren();
14814 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(jsValueValue(cell), m_node->child1().node());
14815 patchpoint->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, jsValueValue(cell)));
14816
14817 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
14818 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
14819 patchpoint->clobber(RegisterSet::macroScratchRegisters());
14820
14821 State* state = &m_ftlState;
14822 Node* node = m_node;
14823 NodeType op = m_node->op();
14824 CodeOrigin semanticNodeOrigin = node->origin.semantic;
14825 JSValue child1Constant = m_state.forNode(m_node->child1()).value();
14826
14827 auto nodeIndex = m_nodeIndexInGraph;
14828 patchpoint->setGenerator(
14829 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14830 AllowMacroScratchRegisterUsage allowScratch(jit);
14831
14832 Vector<GPRReg> gpScratch;
14833 Vector<FPRReg> fpScratch;
14834 Vector<SnippetParams::Value> regs;
14835
14836 regs.append(SnippetParams::Value(params[0].gpr(), child1Constant));
14837
14838 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
14839 gpScratch.append(params.gpScratch(i));
14840
14841 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
14842 fpScratch.append(params.fpScratch(i));
14843
14844 RefPtr<OSRExitHandle> handle = exitDescriptor->emitOSRExitLater(*state, BadType, origin, params, nodeIndex, osrExitArgumentOffset);
14845
14846 SnippetParams domJITParams(*state, params, semanticNodeOrigin, nullptr, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
14847 CCallHelpers::JumpList failureCases = domJIT->generator()->run(jit, domJITParams);
14848 CCallHelpers::JumpList notJSCastFailureCases;
14849 if (op == CheckNotJSCast) {
14850 notJSCastFailureCases.append(jit.jump());
14851 failureCases.link(&jit);
14852 }
14853
14854 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
14855 if (op == CheckJSCast)
14856 linkBuffer.link(failureCases, linkBuffer.locationOf<NoPtrTag>(handle->label));
14857 else
14858 linkBuffer.link(notJSCastFailureCases, linkBuffer.locationOf<NoPtrTag>(handle->label));
14859 });
14860 });
14861 patchpoint->effects = Effects::forCheck();
14862 }
14863
14864 void compileCallDOM()
14865 {
14866 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14867 const DOMJIT::Signature* signature = m_node->signature();
14868
14869 // FIXME: We should have a way to call functions with the vector of registers.
14870 // https://bugs.webkit.org/show_bug.cgi?id=163099
14871 Vector<LValue, JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS> operands;
14872
14873 unsigned index = 0;
14874 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, [&](Node*, Edge edge) {
14875 if (!index)
14876 operands.append(lowCell(edge));
14877 else {
14878 switch (signature->arguments[index - 1]) {
14879 case SpecString:
14880 operands.append(lowString(edge));
14881 break;
14882 case SpecInt32Only:
14883 operands.append(lowInt32(edge));
14884 break;
14885 case SpecBoolean:
14886 operands.append(lowBoolean(edge));
14887 break;
14888 default:
14889 RELEASE_ASSERT_NOT_REACHED();
14890 break;
14891 }
14892 }
14893 ++index;
14894 });
14895
14896 unsigned argumentCountIncludingThis = signature->argumentCount + 1;
14897 LValue result;
14898 // FIXME: Revisit JSGlobalObject.
14899 // https://bugs.webkit.org/show_bug.cgi?id=203204
14900 auto function = CFunctionPtr(signature->functionWithoutTypeCheck);
14901 switch (argumentCountIncludingThis) {
14902 case 1:
14903 result = vmCall(Int64, reinterpret_cast<J_JITOperation_GP>(function.get()), weakPointer(globalObject), operands[0]);
14904 break;
14905 case 2:
14906 result = vmCall(Int64, reinterpret_cast<J_JITOperation_GPP>(function.get()), weakPointer(globalObject), operands[0], operands[1]);
14907 break;
14908 case 3:
14909 result = vmCall(Int64, reinterpret_cast<J_JITOperation_GPPP>(function.get()), weakPointer(globalObject), operands[0], operands[1], operands[2]);
14910 break;
14911 default:
14912 RELEASE_ASSERT_NOT_REACHED();
14913 break;
14914 }
14915
14916 setJSValue(result);
14917 }
14918
14919 void compileCallDOMGetter()
14920 {
14921 DOMJIT::CallDOMGetterSnippet* domJIT = m_node->callDOMGetterData()->snippet;
14922 if (!domJIT) {
14923 // The following function is not an operation: we directly call a custom accessor getter.
14924 // Since the getter does not have code setting topCallFrame, As is the same to IC, we should set topCallFrame in caller side.
14925 // FIXME: Revisit JSGlobalObject.
14926 // https://bugs.webkit.org/show_bug.cgi?id=203204
14927 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
14928 m_out.storePtr(m_callFrame, m_out.absolute(&vm().topCallFrame));
14929 if (Options::useJITCage()) {
14930 setJSValue(
14931 vmCall(Int64, vmEntryCustomGetter, weakPointer(globalObject), lowCell(m_node->child1()), m_out.constIntPtr(m_graph.identifiers()[m_node->callDOMGetterData()->identifierNumber]), m_out.constIntPtr(m_node->callDOMGetterData()->customAccessorGetter.executableAddress())));
14932 } else {
14933 FunctionPtr<CustomAccessorPtrTag> getter = m_node->callDOMGetterData()->customAccessorGetter;
14934 FunctionPtr<OperationPtrTag> bypassedFunction = FunctionPtr<OperationPtrTag>(MacroAssemblerCodePtr<OperationPtrTag>(WTF::tagNativeCodePtrImpl<OperationPtrTag>(WTF::untagNativeCodePtrImpl<CustomAccessorPtrTag>(getter.executableAddress()))));
14935 setJSValue(vmCall(Int64, bypassedFunction, weakPointer(globalObject), lowCell(m_node->child1()), m_out.constIntPtr(m_graph.identifiers()[m_node->callDOMGetterData()->identifierNumber])));
14936 }
14937 return;
14938 }
14939
14940 Edge& baseEdge = m_node->child1();
14941 LValue base = lowCell(baseEdge);
14942 JSValue baseConstant = m_state.forNode(baseEdge).value();
14943
14944 LValue globalObject = nullptr;
14945 JSValue globalObjectConstant;
14946 if (domJIT->requireGlobalObject) {
14947 Edge& globalObjectEdge = m_node->child2();
14948 globalObject = lowCell(globalObjectEdge);
14949 globalObjectConstant = m_state.forNode(globalObjectEdge).value();
14950 }
14951
14952 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
14953 patchpoint->appendSomeRegister(base);
14954 if (domJIT->requireGlobalObject)
14955 patchpoint->appendSomeRegister(globalObject);
14956 patchpoint->append(m_notCellMask, ValueRep::reg(GPRInfo::notCellMaskRegister));
14957 patchpoint->append(m_numberTag, ValueRep::reg(GPRInfo::numberTagRegister));
14958 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
14959 patchpoint->clobber(RegisterSet::macroScratchRegisters());
14960 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
14961 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
14962 patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
14963
14964 State* state = &m_ftlState;
14965 Node* node = m_node;
14966 CodeOrigin semanticNodeOrigin = node->origin.semantic;
14967 patchpoint->setGenerator(
14968 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14969 AllowMacroScratchRegisterUsage allowScratch(jit);
14970
14971 Vector<GPRReg> gpScratch;
14972 Vector<FPRReg> fpScratch;
14973 Vector<SnippetParams::Value> regs;
14974
14975 regs.append(JSValueRegs(params[0].gpr()));
14976 regs.append(SnippetParams::Value(params[1].gpr(), baseConstant));
14977 if (domJIT->requireGlobalObject)
14978 regs.append(SnippetParams::Value(params[2].gpr(), globalObjectConstant));
14979
14980 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
14981 gpScratch.append(params.gpScratch(i));
14982
14983 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
14984 fpScratch.append(params.fpScratch(i));
14985
14986 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
14987
14988 SnippetParams domJITParams(*state, params, semanticNodeOrigin, exceptions, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
14989 domJIT->generator()->run(jit, domJITParams);
14990 });
14991 patchpoint->effects = Effects::forCall();
14992 setJSValue(patchpoint);
14993 }
14994
14995 void compileFilterICStatus()
14996 {
14997 m_interpreter.filterICStatus(m_node);
14998 }
14999
15000 LValue byteSwap32(LValue value)
15001 {
15002 // FIXME: teach B3 byteswap
15003 // https://bugs.webkit.org/show_bug.cgi?id=188759
15004
15005 RELEASE_ASSERT(value->type() == Int32);
15006 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
15007 patchpoint->appendSomeRegister(value);
15008 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15009 jit.move(params[1].gpr(), params[0].gpr());
15010 jit.byteSwap32(params[0].gpr());
15011 });
15012 patchpoint->effects = Effects::none();
15013 return patchpoint;
15014 }
15015
15016 LValue byteSwap64(LValue value)
15017 {
15018 // FIXME: teach B3 byteswap
15019 // https://bugs.webkit.org/show_bug.cgi?id=188759
15020
15021 RELEASE_ASSERT(value->type() == Int64);
15022 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
15023 patchpoint->appendSomeRegister(value);
15024 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15025 jit.move(params[1].gpr(), params[0].gpr());
15026 jit.byteSwap64(params[0].gpr());
15027 });
15028 patchpoint->effects = Effects::none();
15029 return patchpoint;
15030 }
15031
15032 template <typename F1, typename F2>
15033 LValue emitCodeBasedOnEndiannessBranch(LValue isLittleEndian, const F1& emitLittleEndianCode, const F2& emitBigEndianCode)
15034 {
15035 LType type;
15036
15037 LBasicBlock bigEndianCase = m_out.newBlock();
15038 LBasicBlock littleEndianCase = m_out.newBlock();
15039 LBasicBlock continuation = m_out.newBlock();
15040
15041 m_out.branch(m_out.testIsZero32(isLittleEndian, m_out.constInt32(1)),
15042 unsure(bigEndianCase), unsure(littleEndianCase));
15043
15044 LBasicBlock lastNext = m_out.appendTo(bigEndianCase, littleEndianCase);
15045 LValue bigEndianValue = emitBigEndianCode();
15046 type = bigEndianValue ? bigEndianValue->type() : Void;
15047 ValueFromBlock bigEndianResult = bigEndianValue ? m_out.anchor(bigEndianValue) : ValueFromBlock();
15048 m_out.jump(continuation);
15049
15050 m_out.appendTo(littleEndianCase, continuation);
15051 LValue littleEndianValue = emitLittleEndianCode();
15052 ValueFromBlock littleEndianResult = littleEndianValue ? m_out.anchor(littleEndianValue) : ValueFromBlock();
15053 RELEASE_ASSERT((!littleEndianValue && !bigEndianValue) || type == littleEndianValue->type());
15054 m_out.jump(continuation);
15055
15056 m_out.appendTo(continuation, lastNext);
15057 RELEASE_ASSERT(!!bigEndianResult == !!littleEndianResult);
15058 if (bigEndianResult)
15059 return m_out.phi(type, bigEndianResult, littleEndianResult);
15060 return nullptr;
15061 }
15062
15063 void compileDataViewGet()
15064 {
15065 LValue dataView = lowDataViewObject(m_node->child1());
15066 LValue index = lowInt32(m_node->child2());
15067 LValue isLittleEndian = nullptr;
15068 if (m_node->child3())
15069 isLittleEndian = lowBoolean(m_node->child3());
15070
15071 DataViewData data = m_node->dataViewData();
15072
15073 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
15074 LValue indexToCheck = m_out.zeroExtPtr(index);
15075 if (data.byteSize > 1)
15076 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
15077 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
15078
15079 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
15080
15081 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
15082
15083 if (m_node->op() == DataViewGetInt) {
15084 switch (data.byteSize) {
15085 case 1:
15086 if (data.isSigned)
15087 setInt32(m_out.load8SignExt32(pointer));
15088 else
15089 setInt32(m_out.load8ZeroExt32(pointer));
15090 break;
15091 case 2: {
15092 auto emitLittleEndianLoad = [&] {
15093 if (data.isSigned)
15094 return m_out.load16SignExt32(pointer);
15095 return m_out.load16ZeroExt32(pointer);
15096 };
15097
15098 auto emitBigEndianLoad = [&] {
15099 LValue val = m_out.load16ZeroExt32(pointer);
15100
15101 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
15102 patchpoint->appendSomeRegister(val);
15103 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15104 jit.move(params[1].gpr(), params[0].gpr());
15105 jit.byteSwap16(params[0].gpr());
15106 if (data.isSigned)
15107 jit.signExtend16To32(params[0].gpr(), params[0].gpr());
15108 });
15109 patchpoint->effects = Effects::none();
15110
15111 return patchpoint;
15112 };
15113
15114 if (data.isLittleEndian == TriState::False)
15115 setInt32(emitBigEndianLoad());
15116 else if (data.isLittleEndian == TriState::True)
15117 setInt32(emitLittleEndianLoad());
15118 else
15119 setInt32(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianLoad, emitBigEndianLoad));
15120
15121 break;
15122 }
15123 case 4: {
15124 LValue loadedValue = m_out.load32(pointer);
15125
15126 if (data.isLittleEndian == TriState::False)
15127 loadedValue = byteSwap32(loadedValue);
15128 else if (data.isLittleEndian == TriState::Indeterminate) {
15129 auto emitLittleEndianCode = [&] {
15130 return loadedValue;
15131 };
15132 auto emitBigEndianCode = [&] {
15133 return byteSwap32(loadedValue);
15134 };
15135
15136 loadedValue = emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
15137 }
15138
15139 if (data.isSigned)
15140 setInt32(loadedValue);
15141 else
15142 setStrictInt52(m_out.zeroExt(loadedValue, Int64));
15143
15144 break;
15145 }
15146 default:
15147 RELEASE_ASSERT_NOT_REACHED();
15148 }
15149 } else {
15150 switch (data.byteSize) {
15151 case 4: {
15152 auto emitLittleEndianCode = [&] {
15153 return m_out.floatToDouble(m_out.loadFloat(pointer));
15154 };
15155
15156 auto emitBigEndianCode = [&] {
15157 LValue loadedValue = m_out.load32(pointer);
15158 PatchpointValue* patchpoint = m_out.patchpoint(Double);
15159 patchpoint->appendSomeRegister(loadedValue);
15160 patchpoint->numGPScratchRegisters = 1;
15161 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15162 jit.move(params[1].gpr(), params.gpScratch(0));
15163 jit.byteSwap32(params.gpScratch(0));
15164 jit.move32ToFloat(params.gpScratch(0), params[0].fpr());
15165 jit.convertFloatToDouble(params[0].fpr(), params[0].fpr());
15166 });
15167 patchpoint->effects = Effects::none();
15168 return patchpoint;
15169 };
15170
15171 if (data.isLittleEndian == TriState::True)
15172 setDouble(emitLittleEndianCode());
15173 else if (data.isLittleEndian == TriState::False)
15174 setDouble(emitBigEndianCode());
15175 else
15176 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
15177
15178 break;
15179 }
15180 case 8: {
15181 auto emitLittleEndianCode = [&] {
15182 return m_out.loadDouble(pointer);
15183 };
15184
15185 auto emitBigEndianCode = [&] {
15186 LValue loadedValue = m_out.load64(pointer);
15187 loadedValue = byteSwap64(loadedValue);
15188 return m_out.bitCast(loadedValue, Double);
15189 };
15190
15191 if (data.isLittleEndian == TriState::True)
15192 setDouble(emitLittleEndianCode());
15193 else if (data.isLittleEndian == TriState::False)
15194 setDouble(emitBigEndianCode());
15195 else
15196 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
15197
15198 break;
15199 }
15200 default:
15201 RELEASE_ASSERT_NOT_REACHED();
15202 }
15203 }
15204 }
15205
15206 void compileDataViewSet()
15207 {
15208 LValue dataView = lowDataViewObject(m_graph.varArgChild(m_node, 0));
15209 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
15210 LValue isLittleEndian = nullptr;
15211 if (m_graph.varArgChild(m_node, 3))
15212 isLittleEndian = lowBoolean(m_graph.varArgChild(m_node, 3));
15213
15214 DataViewData data = m_node->dataViewData();
15215
15216 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
15217 LValue indexToCheck = m_out.zeroExtPtr(index);
15218 if (data.byteSize > 1)
15219 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
15220 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
15221
15222 Edge& valueEdge = m_graph.varArgChild(m_node, 2);
15223 LValue valueToStore;
15224 switch (valueEdge.useKind()) {
15225 case Int32Use:
15226 valueToStore = lowInt32(valueEdge);
15227 break;
15228 case DoubleRepUse:
15229 valueToStore = lowDouble(valueEdge);
15230 break;
15231 case Int52RepUse:
15232 valueToStore = lowStrictInt52(valueEdge);
15233 break;
15234 default:
15235 RELEASE_ASSERT_NOT_REACHED();
15236 }
15237
15238 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
15239 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
15240
15241 if (data.isFloatingPoint) {
15242 if (data.byteSize == 4) {
15243 valueToStore = m_out.doubleToFloat(valueToStore);
15244
15245 auto emitLittleEndianCode = [&] () -> LValue {
15246 m_out.storeFloat(valueToStore, pointer);
15247 return nullptr;
15248 };
15249
15250 auto emitBigEndianCode = [&] () -> LValue {
15251 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
15252 patchpoint->appendSomeRegister(valueToStore);
15253 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15254 jit.moveFloatTo32(params[1].fpr(), params[0].gpr());
15255 jit.byteSwap32(params[0].gpr());
15256 });
15257 patchpoint->effects = Effects::none();
15258 m_out.store32(patchpoint, pointer);
15259 return nullptr;
15260 };
15261
15262 if (data.isLittleEndian == TriState::False)
15263 emitBigEndianCode();
15264 else if (data.isLittleEndian == TriState::True)
15265 emitLittleEndianCode();
15266 else
15267 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
15268
15269 } else {
15270 RELEASE_ASSERT(data.byteSize == 8);
15271 auto emitLittleEndianCode = [&] () -> LValue {
15272 m_out.storeDouble(valueToStore, pointer);
15273 return nullptr;
15274 };
15275 auto emitBigEndianCode = [&] () -> LValue {
15276 m_out.store64(byteSwap64(m_out.bitCast(valueToStore, Int64)), pointer);
15277 return nullptr;
15278 };
15279
15280 if (data.isLittleEndian == TriState::False)
15281 emitBigEndianCode();
15282 else if (data.isLittleEndian == TriState::True)
15283 emitLittleEndianCode();
15284 else
15285 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
15286 }
15287 } else {
15288 switch (data.byteSize) {
15289 case 1:
15290 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
15291 m_out.store32As8(valueToStore, pointer);
15292 break;
15293 case 2: {
15294 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
15295
15296 auto emitLittleEndianCode = [&] () -> LValue {
15297 m_out.store32As16(valueToStore, pointer);
15298 return nullptr;
15299 };
15300 auto emitBigEndianCode = [&] () -> LValue {
15301 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
15302 patchpoint->appendSomeRegister(valueToStore);
15303 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15304 jit.move(params[1].gpr(), params[0].gpr());
15305 jit.byteSwap16(params[0].gpr());
15306 });
15307 patchpoint->effects = Effects::none();
15308
15309 m_out.store32As16(patchpoint, pointer);
15310 return nullptr;
15311 };
15312
15313 if (data.isLittleEndian == TriState::False)
15314 emitBigEndianCode();
15315 else if (data.isLittleEndian == TriState::True)
15316 emitLittleEndianCode();
15317 else
15318 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
15319 break;
15320 }
15321 case 4: {
15322 RELEASE_ASSERT(valueEdge.useKind() == Int32Use || valueEdge.useKind() == Int52RepUse);
15323
15324 if (valueEdge.useKind() == Int52RepUse)
15325 valueToStore = m_out.castToInt32(valueToStore);
15326
15327 auto emitLittleEndianCode = [&] () -> LValue {
15328 m_out.store32(valueToStore, pointer);
15329 return nullptr;
15330 };
15331 auto emitBigEndianCode = [&] () -> LValue {
15332 m_out.store32(byteSwap32(valueToStore), pointer);
15333 return nullptr;
15334 };
15335
15336 if (data.isLittleEndian == TriState::False)
15337 emitBigEndianCode();
15338 else if (data.isLittleEndian == TriState::True)
15339 emitLittleEndianCode();
15340 else
15341 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
15342
15343 break;
15344 }
15345 default:
15346 RELEASE_ASSERT_NOT_REACHED();
15347 }
15348 }
15349 }
15350
15351 void compileDateGet()
15352 {
15353 LValue base = lowDateObject(m_node->child1());
15354
15355 auto emitGetCodeWithCallback = [&] (const AbstractHeap& cachedDoubleOffset, const AbstractHeap& cachedDataOffset, auto* operation, auto callback) {
15356 LBasicBlock dataExistsCase = m_out.newBlock();
15357 LBasicBlock fastCase = m_out.newBlock();
15358 LBasicBlock slowCase = m_out.newBlock();
15359 LBasicBlock continuation = m_out.newBlock();
15360
15361 LValue data = m_out.loadPtr(base, m_heaps.DateInstance_data);
15362 m_out.branch(m_out.notZero64(data), unsure(dataExistsCase), unsure(slowCase));
15363
15364 LBasicBlock lastNext = m_out.appendTo(dataExistsCase, fastCase);
15365 LValue milliseconds = m_out.loadDouble(base, m_heaps.DateInstance_internalNumber);
15366 LValue cachedMilliseconds = m_out.loadDouble(data, cachedDoubleOffset);
15367 m_out.branch(m_out.doubleNotEqualOrUnordered(milliseconds, cachedMilliseconds), unsure(slowCase), unsure(fastCase));
15368
15369 m_out.appendTo(fastCase, slowCase);
15370 ValueFromBlock fastResult = m_out.anchor(boxInt32(callback(m_out.load32(data, cachedDataOffset))));
15371 m_out.jump(continuation);
15372
15373 m_out.appendTo(slowCase, continuation);
15374 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operation, m_vmValue, base));
15375 m_out.jump(continuation);
15376
15377 m_out.appendTo(continuation, lastNext);
15378 setJSValue(m_out.phi(Int64, fastResult, slowResult));
15379 };
15380
15381 auto emitGetCode = [&] (const AbstractHeap& cachedDoubleOffset, const AbstractHeap& cachedDataOffset, auto* operation) {
15382 emitGetCodeWithCallback(cachedDoubleOffset, cachedDataOffset, operation, [] (LValue value) { return value; });
15383 };
15384
15385 switch (m_node->intrinsic()) {
15386 case DatePrototypeGetTimeIntrinsic:
15387 setDouble(m_out.loadDouble(base, m_heaps.DateInstance_internalNumber));
15388 break;
15389
15390 case DatePrototypeGetMillisecondsIntrinsic:
15391 case DatePrototypeGetUTCMillisecondsIntrinsic: {
15392 LValue milliseconds = m_out.loadDouble(base, m_heaps.DateInstance_internalNumber);
15393 LValue msPerSecondConstant = m_out.constDouble(msPerSecond);
15394 LValue seconds = m_out.doubleFloor(m_out.doubleDiv(milliseconds, msPerSecondConstant));
15395 LValue result = m_out.doubleToInt(m_out.doubleSub(milliseconds, m_out.doubleMul(seconds, msPerSecondConstant)));
15396 setJSValue(m_out.select(m_out.doubleNotEqualOrUnordered(milliseconds, milliseconds), m_out.constInt64(JSValue::encode(jsNaN())), boxInt32(result)));
15397 break;
15398 }
15399
15400 case DatePrototypeGetFullYearIntrinsic:
15401 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_year, operationDateGetFullYear);
15402 break;
15403 case DatePrototypeGetUTCFullYearIntrinsic:
15404 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeUTCCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTimeUTC_year, operationDateGetUTCFullYear);
15405 break;
15406 case DatePrototypeGetMonthIntrinsic:
15407 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_month, operationDateGetMonth);
15408 break;
15409 case DatePrototypeGetUTCMonthIntrinsic:
15410 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeUTCCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTimeUTC_month, operationDateGetUTCMonth);
15411 break;
15412 case DatePrototypeGetDateIntrinsic:
15413 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_monthDay, operationDateGetDate);
15414 break;
15415 case DatePrototypeGetUTCDateIntrinsic:
15416 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeUTCCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTimeUTC_monthDay, operationDateGetUTCDate);
15417 break;
15418 case DatePrototypeGetDayIntrinsic:
15419 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_weekDay, operationDateGetDay);
15420 break;
15421 case DatePrototypeGetUTCDayIntrinsic:
15422 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeUTCCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTimeUTC_weekDay, operationDateGetUTCDay);
15423 break;
15424 case DatePrototypeGetHoursIntrinsic:
15425 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_hour, operationDateGetHours);
15426 break;
15427 case DatePrototypeGetUTCHoursIntrinsic:
15428 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeUTCCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTimeUTC_hour, operationDateGetUTCHours);
15429 break;
15430 case DatePrototypeGetMinutesIntrinsic:
15431 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_minute, operationDateGetMinutes);
15432 break;
15433 case DatePrototypeGetUTCMinutesIntrinsic:
15434 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeUTCCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTimeUTC_minute, operationDateGetUTCMinutes);
15435 break;
15436 case DatePrototypeGetSecondsIntrinsic:
15437 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_second, operationDateGetSeconds);
15438 break;
15439 case DatePrototypeGetUTCSecondsIntrinsic:
15440 emitGetCode(m_heaps.DateInstanceData_gregorianDateTimeUTCCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTimeUTC_second, operationDateGetUTCSeconds);
15441 break;
15442
15443 case DatePrototypeGetTimezoneOffsetIntrinsic:
15444 emitGetCodeWithCallback(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_utcOffsetInMinute, operationDateGetTimezoneOffset, [&] (LValue offset) {
15445 return m_out.neg(offset);
15446 });
15447 break;
15448
15449 case DatePrototypeGetYearIntrinsic:
15450 emitGetCodeWithCallback(m_heaps.DateInstanceData_gregorianDateTimeCachedForMS, m_heaps.DateInstanceData_cachedGregorianDateTime_year, operationDateGetYear, [&] (LValue year) {
15451 return m_out.sub(year, m_out.constInt32(1900));
15452 });
15453 break;
15454
15455 default:
15456 RELEASE_ASSERT_NOT_REACHED();
15457 }
15458 }
15459
15460 void compileLoopHint()
15461 {
15462 if (LIKELY(!Options::returnEarlyFromInfiniteLoopsForFuzzing()))
15463 return;
15464
15465 bool emitEarlyReturn = true;
15466 m_origin.semantic.walkUpInlineStack([&](CodeOrigin origin) {
15467 CodeBlock* baselineCodeBlock = m_graph.baselineCodeBlockFor(origin);
15468 if (!baselineCodeBlock->loopHintsAreEligibleForFuzzingEarlyReturn())
15469 emitEarlyReturn = false;
15470 });
15471 if (!emitEarlyReturn)
15472 return;
15473
15474 CodeBlock* baselineCodeBlock = m_graph.baselineCodeBlockFor(m_origin.semantic);
15475 BytecodeIndex bytecodeIndex = m_origin.semantic.bytecodeIndex();
15476 const Instruction* instruction = baselineCodeBlock->instructions().at(bytecodeIndex.offset()).ptr();
15477 VM* vm = &this->vm();
15478 uintptr_t* ptr = vm->getLoopHintExecutionCounter(instruction);
15479
15480 PatchpointValue* patchpoint = m_out.patchpoint(Void);
15481 patchpoint->effects = Effects::none();
15482 patchpoint->effects.exitsSideways = true;
15483 patchpoint->effects.writesLocalState = true;
15484 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15485 auto restore = [&] {
15486 jit.popToRestore(GPRInfo::regT2);
15487 jit.popToRestore(GPRInfo::regT1);
15488 jit.popToRestore(GPRInfo::regT0);
15489 };
15490
15491 jit.pushToSave(GPRInfo::regT0);
15492 jit.pushToSave(GPRInfo::regT1);
15493 jit.pushToSave(GPRInfo::regT2);
15494
15495 jit.move(CCallHelpers::TrustedImm64(Options::earlyReturnFromInfiniteLoopsLimit()), GPRInfo::regT2);
15496 jit.move(CCallHelpers::TrustedImmPtr(ptr), GPRInfo::regT0);
15497 jit.load64(CCallHelpers::Address(GPRInfo::regT0), GPRInfo::regT1);
15498 auto skipEarlyReturn = jit.branch64(CCallHelpers::Below, GPRInfo::regT1, GPRInfo::regT2);
15499
15500 if constexpr (validateDFGDoesGC) {
15501 if (Options::validateDoesGC()) {
15502 // We need to mock what a Return does: claims to GC.
15503 jit.move(CCallHelpers::TrustedImmPtr(vm->heap.addressOfDoesGC()), GPRInfo::regT0);
15504 jit.move(CCallHelpers::TrustedImm32(DoesGCCheck::encode(true, DoesGCCheck::Special::Uninitialized)), GPRInfo::regT1);
15505 jit.store32(GPRInfo::regT1, CCallHelpers::Address(GPRInfo::regT0));
15506 }
15507 }
15508 restore();
15509 jit.moveValue(baselineCodeBlock->globalObject(), JSValueRegs { GPRInfo::returnValueGPR });
15510 params.code().emitEpilogue(jit);
15511
15512 skipEarlyReturn.link(&jit);
15513 jit.add64(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
15514 jit.store64(GPRInfo::regT1, CCallHelpers::Address(GPRInfo::regT0));
15515 restore();
15516 });
15517 }
15518
15519 void emitSwitchForMultiByOffset(LValue base, bool structuresChecked, Vector<SwitchCase, 2>& cases, LBasicBlock exit)
15520 {
15521 if (cases.isEmpty()) {
15522 m_out.jump(exit);
15523 return;
15524 }
15525
15526 if (structuresChecked) {
15527 std::sort(
15528 cases.begin(), cases.end(),
15529 [&] (const SwitchCase& a, const SwitchCase& b) -> bool {
15530 return a.value()->asInt() < b.value()->asInt();
15531 });
15532 SwitchCase last = cases.takeLast();
15533 m_out.switchInstruction(
15534 m_out.load32(base, m_heaps.JSCell_structureID), cases, last.target(), Weight(0));
15535 return;
15536 }
15537
15538 m_out.switchInstruction(
15539 m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0));
15540 }
15541
15542 void compareEqObjectOrOtherToObject(Edge leftChild, Edge rightChild)
15543 {
15544 LValue rightCell = lowCell(rightChild);
15545 LValue leftValue = lowJSValue(leftChild, ManualOperandSpeculation);
15546
15547 speculateTruthyObject(rightChild, rightCell, SpecObject);
15548
15549 LBasicBlock leftCellCase = m_out.newBlock();
15550 LBasicBlock leftNotCellCase = m_out.newBlock();
15551 LBasicBlock continuation = m_out.newBlock();
15552
15553 m_out.branch(
15554 isCell(leftValue, provenType(leftChild)),
15555 unsure(leftCellCase), unsure(leftNotCellCase));
15556
15557 LBasicBlock lastNext = m_out.appendTo(leftCellCase, leftNotCellCase);
15558 speculateTruthyObject(leftChild, leftValue, SpecObject | (~SpecCellCheck));
15559 ValueFromBlock cellResult = m_out.anchor(m_out.equal(rightCell, leftValue));
15560 m_out.jump(continuation);
15561
15562 m_out.appendTo(leftNotCellCase, continuation);
15563 FTL_TYPE_CHECK(
15564 jsValueValue(leftValue), leftChild, SpecOther | SpecCellCheck, isNotOther(leftValue));
15565 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
15566 m_out.jump(continuation);
15567
15568 m_out.appendTo(continuation, lastNext);
15569 setBoolean(m_out.phi(Int32, cellResult, notCellResult));
15570 }
15571
15572 void speculateTruthyObject(Edge edge, LValue cell, SpeculatedType filter)
15573 {
15574 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
15575 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
15576 return;
15577 }
15578
15579 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
15580 speculate(
15581 BadType, jsValueValue(cell), edge.node(),
15582 m_out.testNonZero32(
15583 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
15584 m_out.constInt32(MasqueradesAsUndefined)));
15585 }
15586
15587 template<typename IntFunctor>
15588 void genericJSValueCompare(const IntFunctor& intFunctor, S_JITOperation_GJJ helperFunction)
15589 {
15590 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
15591 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
15592 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
15593 speculate(m_node->child1());
15594 speculate(m_node->child2());
15595
15596 LBasicBlock leftIsInt = m_out.newBlock();
15597 LBasicBlock fastPath = m_out.newBlock();
15598 LBasicBlock slowPath = m_out.newBlock();
15599 LBasicBlock continuation = m_out.newBlock();
15600
15601 m_out.branch(isNotInt32(left, provenType(m_node->child1())), rarely(slowPath), usually(leftIsInt));
15602
15603 LBasicBlock lastNext = m_out.appendTo(leftIsInt, fastPath);
15604 m_out.branch(isNotInt32(right, provenType(m_node->child2())), rarely(slowPath), usually(fastPath));
15605
15606 m_out.appendTo(fastPath, slowPath);
15607 ValueFromBlock fastResult = m_out.anchor(intFunctor(unboxInt32(left), unboxInt32(right)));
15608 m_out.jump(continuation);
15609
15610 m_out.appendTo(slowPath, continuation);
15611 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
15612 pointerType(), helperFunction, weakPointer(globalObject), left, right)));
15613 m_out.jump(continuation);
15614
15615 m_out.appendTo(continuation, lastNext);
15616 setBoolean(m_out.phi(Int32, fastResult, slowResult));
15617 }
15618
15619 LValue stringsEqual(LValue leftJSString, LValue rightJSString, Edge leftJSStringEdge = Edge(), Edge rightJSStringEdge = Edge())
15620 {
15621 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
15622 LBasicBlock notTriviallyUnequalCase = m_out.newBlock();
15623 LBasicBlock notEmptyCase = m_out.newBlock();
15624 LBasicBlock leftReadyCase = m_out.newBlock();
15625 LBasicBlock rightReadyCase = m_out.newBlock();
15626 LBasicBlock left8BitCase = m_out.newBlock();
15627 LBasicBlock right8BitCase = m_out.newBlock();
15628 LBasicBlock loop = m_out.newBlock();
15629 LBasicBlock bytesEqual = m_out.newBlock();
15630 LBasicBlock trueCase = m_out.newBlock();
15631 LBasicBlock falseCase = m_out.newBlock();
15632 LBasicBlock slowCase = m_out.newBlock();
15633 LBasicBlock continuation = m_out.newBlock();
15634
15635 m_out.branch(isRopeString(leftJSString, leftJSStringEdge), rarely(slowCase), usually(leftReadyCase));
15636
15637 LBasicBlock lastNext = m_out.appendTo(leftReadyCase, rightReadyCase);
15638 m_out.branch(isRopeString(rightJSString, rightJSStringEdge), rarely(slowCase), usually(rightReadyCase));
15639
15640 m_out.appendTo(rightReadyCase, notTriviallyUnequalCase);
15641 LValue left = m_out.loadPtr(leftJSString, m_heaps.JSString_value);
15642 LValue right = m_out.loadPtr(rightJSString, m_heaps.JSString_value);
15643 LValue length = m_out.load32(left, m_heaps.StringImpl_length);
15644 m_out.branch(
15645 m_out.notEqual(length, m_out.load32(right, m_heaps.StringImpl_length)),
15646 unsure(falseCase), unsure(notTriviallyUnequalCase));
15647
15648 m_out.appendTo(notTriviallyUnequalCase, notEmptyCase);
15649 m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase));
15650
15651 m_out.appendTo(notEmptyCase, left8BitCase);
15652 m_out.branch(
15653 m_out.testIsZero32(
15654 m_out.load32(left, m_heaps.StringImpl_hashAndFlags),
15655 m_out.constInt32(StringImpl::flagIs8Bit())),
15656 unsure(slowCase), unsure(left8BitCase));
15657
15658 m_out.appendTo(left8BitCase, right8BitCase);
15659 m_out.branch(
15660 m_out.testIsZero32(
15661 m_out.load32(right, m_heaps.StringImpl_hashAndFlags),
15662 m_out.constInt32(StringImpl::flagIs8Bit())),
15663 unsure(slowCase), unsure(right8BitCase));
15664
15665 m_out.appendTo(right8BitCase, loop);
15666
15667 LValue leftData = m_out.loadPtr(left, m_heaps.StringImpl_data);
15668 LValue rightData = m_out.loadPtr(right, m_heaps.StringImpl_data);
15669
15670 ValueFromBlock indexAtStart = m_out.anchor(length);
15671
15672 m_out.jump(loop);
15673
15674 m_out.appendTo(loop, bytesEqual);
15675
15676 LValue indexAtLoopTop = m_out.phi(Int32, indexAtStart);
15677 LValue indexInLoop = m_out.sub(indexAtLoopTop, m_out.int32One);
15678
15679 LValue leftByte = m_out.load8ZeroExt32(
15680 m_out.baseIndex(m_heaps.characters8, leftData, m_out.zeroExtPtr(indexInLoop)));
15681 LValue rightByte = m_out.load8ZeroExt32(
15682 m_out.baseIndex(m_heaps.characters8, rightData, m_out.zeroExtPtr(indexInLoop)));
15683
15684 m_out.branch(m_out.notEqual(leftByte, rightByte), unsure(falseCase), unsure(bytesEqual));
15685
15686 m_out.appendTo(bytesEqual, trueCase);
15687
15688 ValueFromBlock indexForNextIteration = m_out.anchor(indexInLoop);
15689 m_out.addIncomingToPhi(indexAtLoopTop, indexForNextIteration);
15690 m_out.branch(m_out.notZero32(indexInLoop), unsure(loop), unsure(trueCase));
15691
15692 m_out.appendTo(trueCase, falseCase);
15693
15694 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
15695 m_out.jump(continuation);
15696
15697 m_out.appendTo(falseCase, slowCase);
15698
15699 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
15700 m_out.jump(continuation);
15701
15702 m_out.appendTo(slowCase, continuation);
15703
15704 LValue slowResultValue = vmCall(
15705 Int64, operationCompareStringEq, weakPointer(globalObject),
15706 leftJSString, rightJSString);
15707 ValueFromBlock slowResult = m_out.anchor(unboxBoolean(slowResultValue));
15708 m_out.jump(continuation);
15709
15710 m_out.appendTo(continuation, lastNext);
15711 return m_out.phi(Int32, trueResult, falseResult, slowResult);
15712 }
15713
15714 enum ScratchFPRUsage {
15715 DontNeedScratchFPR,
15716 NeedScratchFPR
15717 };
15718 template<typename BinaryArithOpGenerator, ScratchFPRUsage scratchFPRUsage = DontNeedScratchFPR>
15719 void emitBinarySnippet(J_JITOperation_GJJ slowPathFunction)
15720 {
15721 Node* node = m_node;
15722
15723 LValue left = lowJSValue(node->child1(), ManualOperandSpeculation);
15724 LValue right = lowJSValue(node->child2(), ManualOperandSpeculation);
15725 speculate(node->child1());
15726 speculate(node->child2());
15727
15728 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
15729 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
15730
15731 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
15732 patchpoint->appendSomeRegister(left);
15733 patchpoint->appendSomeRegister(right);
15734 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
15735 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
15736 RefPtr<PatchpointExceptionHandle> exceptionHandle =
15737 preparePatchpointForExceptions(patchpoint);
15738 patchpoint->numGPScratchRegisters = 1;
15739 patchpoint->numFPScratchRegisters = 2;
15740 if (scratchFPRUsage == NeedScratchFPR)
15741 patchpoint->numFPScratchRegisters++;
15742 patchpoint->clobber(RegisterSet::macroScratchRegisters());
15743 patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
15744 State* state = &m_ftlState;
15745 CodeOrigin semanticNodeOrigin = node->origin.semantic;
15746 patchpoint->setGenerator(
15747 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15748 AllowMacroScratchRegisterUsage allowScratch(jit);
15749
15750 Box<CCallHelpers::JumpList> exceptions =
15751 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
15752
15753 auto generator = Box<BinaryArithOpGenerator>::create(
15754 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
15755 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
15756 params.fpScratch(0), params.fpScratch(1), params.gpScratch(0),
15757 scratchFPRUsage == NeedScratchFPR ? params.fpScratch(2) : InvalidFPRReg);
15758
15759 generator->generateFastPath(jit);
15760
15761 if (generator->didEmitFastPath()) {
15762 generator->endJumpList().link(&jit);
15763 CCallHelpers::Label done = jit.label();
15764
15765 params.addLatePath(
15766 [=] (CCallHelpers& jit) {
15767 AllowMacroScratchRegisterUsage allowScratch(jit);
15768
15769 generator->slowPathJumpList().link(&jit);
15770 callOperation(
15771 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
15772 exceptions.get(), slowPathFunction, params[0].gpr(),
15773 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
15774 params[1].gpr(), params[2].gpr());
15775 jit.jump().linkTo(done, &jit);
15776 });
15777 } else {
15778 callOperation(
15779 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
15780 exceptions.get(), slowPathFunction, params[0].gpr(), jit.codeBlock()->globalObjectFor(semanticNodeOrigin), params[1].gpr(),
15781 params[2].gpr());
15782 }
15783 });
15784
15785 setJSValue(patchpoint);
15786 }
15787
15788 template<typename BinaryBitOpGenerator>
15789 void emitBinaryBitOpSnippet(J_JITOperation_GJJ slowPathFunction)
15790 {
15791 Node* node = m_node;
15792
15793 DFG_ASSERT(m_graph, node, node->isBinaryUseKind(UntypedUse) || node->isBinaryUseKind(AnyBigIntUse) || node->isBinaryUseKind(BigInt32Use));
15794 LValue left = lowJSValue(node->child1(), ManualOperandSpeculation);
15795 LValue right = lowJSValue(node->child2(), ManualOperandSpeculation);
15796 speculate(node, node->child1());
15797 speculate(node, node->child2());
15798
15799 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
15800 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
15801
15802 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
15803 patchpoint->appendSomeRegister(left);
15804 patchpoint->appendSomeRegister(right);
15805 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
15806 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
15807 RefPtr<PatchpointExceptionHandle> exceptionHandle =
15808 preparePatchpointForExceptions(patchpoint);
15809 patchpoint->numGPScratchRegisters = 1;
15810 patchpoint->clobber(RegisterSet::macroScratchRegisters());
15811 patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
15812 State* state = &m_ftlState;
15813 CodeOrigin semanticNodeOrigin = node->origin.semantic;
15814 patchpoint->setGenerator(
15815 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15816 AllowMacroScratchRegisterUsage allowScratch(jit);
15817
15818 Box<CCallHelpers::JumpList> exceptions =
15819 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
15820
15821 auto generator = Box<BinaryBitOpGenerator>::create(
15822 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
15823 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.gpScratch(0));
15824
15825 generator->generateFastPath(jit);
15826 generator->endJumpList().link(&jit);
15827 CCallHelpers::Label done = jit.label();
15828
15829 params.addLatePath(
15830 [=] (CCallHelpers& jit) {
15831 AllowMacroScratchRegisterUsage allowScratch(jit);
15832
15833 generator->slowPathJumpList().link(&jit);
15834 callOperation(
15835 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
15836 exceptions.get(), slowPathFunction, params[0].gpr(),
15837 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
15838 params[1].gpr(), params[2].gpr());
15839 jit.jump().linkTo(done, &jit);
15840 });
15841 });
15842
15843 setJSValue(patchpoint);
15844 }
15845
15846 void emitRightShiftSnippet(JITRightShiftGenerator::ShiftType shiftType)
15847 {
15848 Node* node = m_node;
15849
15850 // FIXME: Make this do exceptions.
15851 // https://bugs.webkit.org/show_bug.cgi?id=151686
15852
15853 LValue left = lowJSValue(node->child1());
15854 LValue right = lowJSValue(node->child2());
15855
15856 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
15857 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
15858
15859 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
15860 patchpoint->appendSomeRegister(left);
15861 patchpoint->appendSomeRegister(right);
15862 patchpoint->append(m_notCellMask, ValueRep::lateReg(GPRInfo::notCellMaskRegister));
15863 patchpoint->append(m_numberTag, ValueRep::lateReg(GPRInfo::numberTagRegister));
15864 RefPtr<PatchpointExceptionHandle> exceptionHandle =
15865 preparePatchpointForExceptions(patchpoint);
15866 patchpoint->numGPScratchRegisters = 1;
15867 patchpoint->numFPScratchRegisters = 1;
15868 patchpoint->clobber(RegisterSet::macroScratchRegisters());
15869 patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
15870 State* state = &m_ftlState;
15871 CodeOrigin semanticNodeOrigin = node->origin.semantic;
15872 patchpoint->setGenerator(
15873 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15874 AllowMacroScratchRegisterUsage allowScratch(jit);
15875
15876 Box<CCallHelpers::JumpList> exceptions =
15877 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
15878
15879 auto generator = Box<JITRightShiftGenerator>::create(
15880 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
15881 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
15882 params.fpScratch(0), params.gpScratch(0), InvalidFPRReg, shiftType);
15883
15884 generator->generateFastPath(jit);
15885 generator->endJumpList().link(&jit);
15886 CCallHelpers::Label done = jit.label();
15887
15888 params.addLatePath(
15889 [=] (CCallHelpers& jit) {
15890 AllowMacroScratchRegisterUsage allowScratch(jit);
15891
15892 generator->slowPathJumpList().link(&jit);
15893
15894 J_JITOperation_GJJ slowPathFunction =
15895 shiftType == JITRightShiftGenerator::SignedShift
15896 ? operationValueBitRShift : operationValueBitURShift;
15897
15898 callOperation(
15899 *state, params.unavailableRegisters(), jit, semanticNodeOrigin,
15900 exceptions.get(), slowPathFunction, params[0].gpr(),
15901 jit.codeBlock()->globalObjectFor(semanticNodeOrigin),
15902 params[1].gpr(), params[2].gpr());
15903 jit.jump().linkTo(done, &jit);
15904 });
15905 });
15906
15907 setJSValue(patchpoint);
15908 }
15909
15910 LValue allocateHeapCell(LValue allocator, LBasicBlock slowPath)
15911 {
15912 JITAllocator actualAllocator;
15913 if (allocator->hasIntPtr())
15914 actualAllocator = JITAllocator::constant(Allocator(bitwise_cast<LocalAllocator*>(allocator->asIntPtr())));
15915 else
15916 actualAllocator = JITAllocator::variable();
15917
15918 if (actualAllocator.isConstant()) {
15919 if (!actualAllocator.allocator()) {
15920 LBasicBlock haveAllocator = m_out.newBlock();
15921 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
15922 m_out.jump(slowPath);
15923 m_out.appendTo(haveAllocator, lastNext);
15924 return m_out.intPtrZero;
15925 }
15926 } else {
15927 // This means that either we know that the allocator is null or we don't know what the
15928 // allocator is. In either case, we need the null check.
15929 LBasicBlock haveAllocator = m_out.newBlock();
15930 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
15931 m_out.branch(
15932 m_out.notEqual(allocator, m_out.intPtrZero),
15933 usually(haveAllocator), rarely(slowPath));
15934 m_out.appendTo(haveAllocator, lastNext);
15935 }
15936
15937 LBasicBlock continuation = m_out.newBlock();
15938
15939 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
15940
15941 PatchpointValue* patchpoint = m_out.patchpoint(pointerType());
15942 if (isARM64()) {
15943 // emitAllocateWithNonNullAllocator uses the scratch registers on ARM.
15944 patchpoint->clobber(RegisterSet::macroScratchRegisters());
15945 }
15946 patchpoint->effects.terminal = true;
15947 if (actualAllocator.isConstant())
15948 patchpoint->numGPScratchRegisters++;
15949 else
15950 patchpoint->appendSomeRegisterWithClobber(allocator);
15951 patchpoint->numGPScratchRegisters++;
15952 patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
15953
15954 m_out.appendSuccessor(usually(continuation));
15955 m_out.appendSuccessor(rarely(slowPath));
15956
15957 patchpoint->setGenerator(
15958 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
15959 AllowMacroScratchRegisterUsageIf allowScratchIf(jit, isARM64());
15960 CCallHelpers::JumpList jumpToSlowPath;
15961
15962 GPRReg allocatorGPR;
15963 if (actualAllocator.isConstant())
15964 allocatorGPR = params.gpScratch(1);
15965 else
15966 allocatorGPR = params[1].gpr();
15967
15968 // We use a patchpoint to emit the allocation path because whenever we mess with
15969 // allocation paths, we already reason about them at the machine code level. We know
15970 // exactly what instruction sequence we want. We're confident that no compiler
15971 // optimization could make this code better. So, it's best to have the code in
15972 // AssemblyHelpers::emitAllocate(). That way, the same optimized path is shared by
15973 // all of the compiler tiers.
15974 jit.emitAllocateWithNonNullAllocator(
15975 params[0].gpr(), actualAllocator, allocatorGPR, params.gpScratch(0),
15976 jumpToSlowPath);
15977
15978 CCallHelpers::Jump jumpToSuccess;
15979 if (!params.fallsThroughToSuccessor(0))
15980 jumpToSuccess = jit.jump();
15981
15982 Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
15983
15984 params.addLatePath(
15985 [=] (CCallHelpers& jit) {
15986 jumpToSlowPath.linkTo(*labels[1], &jit);
15987 if (jumpToSuccess.isSet())
15988 jumpToSuccess.linkTo(*labels[0], &jit);
15989 });
15990 });
15991
15992 m_out.appendTo(continuation, lastNext);
15993 return patchpoint;
15994 }
15995
15996 void storeStructure(LValue object, Structure* structure)
15997 {
15998 m_out.store32(m_out.constInt32(structure->id()), object, m_heaps.JSCell_structureID);
15999 m_out.store32(
16000 m_out.constInt32(structure->objectInitializationBlob()),
16001 object, m_heaps.JSCell_usefulBytes);
16002 }
16003
16004 void storeStructure(LValue object, LValue structure)
16005 {
16006 if (structure->hasIntPtr()) {
16007 storeStructure(object, bitwise_cast<Structure*>(structure->asIntPtr()));
16008 return;
16009 }
16010
16011 LValue id = m_out.load32(structure, m_heaps.Structure_structureID);
16012 m_out.store32(id, object, m_heaps.JSCell_structureID);
16013
16014 LValue blob = m_out.load32(structure, m_heaps.Structure_indexingModeIncludingHistory);
16015 m_out.store32(blob, object, m_heaps.JSCell_usefulBytes);
16016 }
16017
16018 template <typename StructureType>
16019 LValue allocateCell(LValue allocator, StructureType structure, LBasicBlock slowPath)
16020 {
16021 LValue result = allocateHeapCell(allocator, slowPath);
16022 storeStructure(result, structure);
16023 return result;
16024 }
16025
16026 LValue allocateObject(LValue allocator, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
16027 {
16028 return allocateObject(allocator, weakStructure(structure), butterfly, slowPath);
16029 }
16030
16031 LValue allocateObject(LValue allocator, LValue structure, LValue butterfly, LBasicBlock slowPath)
16032 {
16033 LValue result = allocateCell(allocator, structure, slowPath);
16034 if (structure->hasIntPtr()) {
16035 splatWords(
16036 result,
16037 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
16038 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8 + bitwise_cast<Structure*>(structure->asIntPtr())->inlineCapacity()),
16039 m_out.int64Zero,
16040 m_heaps.properties.atAnyNumber());
16041 } else {
16042 LValue end = m_out.add(
16043 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
16044 m_out.load8ZeroExt32(structure, m_heaps.Structure_inlineCapacity));
16045 splatWords(
16046 result,
16047 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
16048 end,
16049 m_out.int64Zero,
16050 m_heaps.properties.atAnyNumber());
16051 }
16052
16053 m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly);
16054 return result;
16055 }
16056
16057 template<typename ClassType, typename StructureType>
16058 LValue allocateObject(
16059 size_t size, StructureType structure, LValue butterfly, LBasicBlock slowPath)
16060 {
16061 Allocator allocator = allocatorForNonVirtualConcurrently<ClassType>(vm(), size, AllocatorForMode::AllocatorIfExists);
16062 return allocateObject(
16063 m_out.constIntPtr(allocator.localAllocator()), structure, butterfly, slowPath);
16064 }
16065
16066 template<typename ClassType, typename StructureType>
16067 LValue allocateObject(StructureType structure, LValue butterfly, LBasicBlock slowPath)
16068 {
16069 return allocateObject<ClassType>(
16070 ClassType::allocationSize(0), structure, butterfly, slowPath);
16071 }
16072
16073 LValue allocatorForSize(LValue subspace, LValue size, LBasicBlock slowPath)
16074 {
16075 static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
16076
16077 // Try to do some constant-folding here.
16078 if (subspace->hasIntPtr() && size->hasIntPtr()) {
16079 CompleteSubspace* actualSubspace = bitwise_cast<CompleteSubspace*>(subspace->asIntPtr());
16080 size_t actualSize = size->asIntPtr();
16081
16082 Allocator actualAllocator = actualSubspace->allocatorForNonVirtual(actualSize, AllocatorForMode::AllocatorIfExists);
16083 if (!actualAllocator) {
16084 LBasicBlock continuation = m_out.newBlock();
16085 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
16086 m_out.jump(slowPath);
16087 m_out.appendTo(continuation, lastNext);
16088 return m_out.intPtrZero;
16089 }
16090
16091 return m_out.constIntPtr(actualAllocator.localAllocator());
16092 }
16093
16094 unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
16095
16096 LBasicBlock continuation = m_out.newBlock();
16097
16098 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
16099
16100 LValue sizeClassIndex = m_out.lShr(
16101 m_out.add(size, m_out.constIntPtr(MarkedSpace::sizeStep - 1)),
16102 m_out.constInt32(stepShift));
16103
16104 m_out.branch(
16105 m_out.above(sizeClassIndex, m_out.constIntPtr(MarkedSpace::largeCutoff >> stepShift)),
16106 rarely(slowPath), usually(continuation));
16107
16108 m_out.appendTo(continuation, lastNext);
16109
16110 return m_out.loadPtr(
16111 m_out.baseIndex(
16112 m_heaps.CompleteSubspace_allocatorForSizeStep,
16113 subspace, sizeClassIndex));
16114 }
16115
16116 LValue allocatorForSize(CompleteSubspace& subspace, LValue size, LBasicBlock slowPath)
16117 {
16118 return allocatorForSize(m_out.constIntPtr(&subspace), size, slowPath);
16119 }
16120
16121 template<typename ClassType>
16122 LValue allocateVariableSizedObject(
16123 LValue size, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
16124 {
16125 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
16126 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
16127 LValue allocator = allocatorForSize(*subspace, size, slowPath);
16128 return allocateObject(allocator, structure, butterfly, slowPath);
16129 }
16130
16131 template<typename ClassType>
16132 LValue allocateVariableSizedCell(
16133 LValue size, Structure* structure, LBasicBlock slowPath)
16134 {
16135 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
16136 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
16137 LValue allocator = allocatorForSize(*subspace, size, slowPath);
16138 return allocateCell(allocator, structure, slowPath);
16139 }
16140
16141 LValue allocateObject(RegisteredStructure structure)
16142 {
16143 size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
16144 Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
16145
16146 // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
16147 // instead of putting it on the slow path.
16148 // https://bugs.webkit.org/show_bug.cgi?id=161062
16149
16150 LBasicBlock slowPath = m_out.newBlock();
16151 LBasicBlock continuation = m_out.newBlock();
16152
16153 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
16154
16155 ValueFromBlock fastResult = m_out.anchor(allocateObject(
16156 m_out.constIntPtr(allocator.localAllocator()), structure, m_out.intPtrZero, slowPath));
16157
16158 m_out.jump(continuation);
16159
16160 m_out.appendTo(slowPath, continuation);
16161
16162 VM& vm = this->vm();
16163 LValue slowResultValue = lazySlowPath(
16164 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
16165 return createLazyCallGenerator(vm,
16166 operationNewObject, locations[0].directGPR(), &vm,
16167 CCallHelpers::TrustedImmPtr(structure.get()));
16168 });
16169 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
16170 m_out.jump(continuation);
16171
16172 m_out.appendTo(continuation, lastNext);
16173 return m_out.phi(pointerType(), fastResult, slowResult);
16174 }
16175
16176 struct ArrayValues {
16177 ArrayValues()
16178 : array(nullptr)
16179 , butterfly(nullptr)
16180 {
16181 }
16182
16183 ArrayValues(LValue array, LValue butterfly)
16184 : array(array)
16185 , butterfly(butterfly)
16186 {
16187 }
16188
16189 LValue array;
16190 LValue butterfly;
16191 };
16192
16193 ArrayValues allocateJSArray(LValue publicLength, LValue vectorLength, LValue structure, LValue indexingType, bool shouldInitializeElements = true, bool shouldLargeArraySizeCreateArrayStorage = true)
16194 {
16195 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
16196 LBasicBlock fastCase = m_out.newBlock();
16197 LBasicBlock largeCase = m_out.newBlock();
16198 LBasicBlock failCase = m_out.newBlock();
16199 LBasicBlock continuation = m_out.newBlock();
16200 LBasicBlock slowCase = m_out.newBlock();
16201
16202 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastCase);
16203
16204 std::optional<unsigned> staticVectorLength;
16205 std::optional<unsigned> staticVectorLengthFromPublicLength;
16206 if (structure->hasIntPtr()) {
16207 if (publicLength->hasInt32()) {
16208 unsigned publicLengthConst = static_cast<unsigned>(publicLength->asInt32());
16209 if (publicLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
16210 publicLengthConst = Butterfly::optimalContiguousVectorLength(
16211 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), publicLengthConst);
16212 staticVectorLengthFromPublicLength = publicLengthConst;
16213 }
16214
16215 }
16216 if (vectorLength->hasInt32()) {
16217 unsigned vectorLengthConst = static_cast<unsigned>(vectorLength->asInt32());
16218 if (vectorLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
16219 vectorLengthConst = Butterfly::optimalContiguousVectorLength(
16220 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), vectorLengthConst);
16221 vectorLength = m_out.constInt32(vectorLengthConst);
16222 staticVectorLength = vectorLengthConst;
16223 }
16224 }
16225 } else {
16226 // We don't compute the optimal vector length for new Array(blah) where blah is not
16227 // statically known, since the compute effort of doing it here is probably not worth it.
16228 }
16229
16230 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
16231
16232 LValue predicate;
16233 if (shouldLargeArraySizeCreateArrayStorage)
16234 predicate = m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
16235 else
16236 predicate = m_out.booleanFalse;
16237
16238 m_out.branch(predicate, rarely(largeCase), usually(fastCase));
16239
16240 m_out.appendTo(fastCase, largeCase);
16241
16242 LValue payloadSize =
16243 m_out.shl(m_out.zeroExt(vectorLength, pointerType()), m_out.constIntPtr(3));
16244
16245 LValue butterflySize = m_out.add(
16246 payloadSize, m_out.constIntPtr(sizeof(IndexingHeader)));
16247
16248 LValue allocator = allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, failCase);
16249 LValue startOfStorage = allocateHeapCell(allocator, failCase);
16250
16251 LValue butterfly = m_out.add(startOfStorage, m_out.constIntPtr(sizeof(IndexingHeader)));
16252
16253 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
16254 m_out.store32(vectorLength, butterfly, m_heaps.Butterfly_vectorLength);
16255
16256 initializeArrayElements(
16257 indexingType,
16258 shouldInitializeElements ? m_out.int32Zero : publicLength, vectorLength,
16259 butterfly);
16260
16261 ValueFromBlock haveButterfly = m_out.anchor(butterfly);
16262
16263 LValue object = allocateObject<JSArray>(structure, butterfly, failCase);
16264
16265 ValueFromBlock fastResult = m_out.anchor(object);
16266 ValueFromBlock fastButterfly = m_out.anchor(butterfly);
16267 m_out.jump(continuation);
16268
16269 m_out.appendTo(largeCase, failCase);
16270 ValueFromBlock largeStructure = m_out.anchor(
16271 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))));
16272 m_out.jump(slowCase);
16273
16274 m_out.appendTo(failCase, slowCase);
16275 ValueFromBlock failStructure = m_out.anchor(structure);
16276 m_out.jump(slowCase);
16277
16278 m_out.appendTo(slowCase, continuation);
16279 LValue structureValue = m_out.phi(pointerType(), largeStructure, failStructure);
16280 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
16281
16282 VM& vm = this->vm();
16283 LValue slowResultValue = nullptr;
16284 if (vectorLength == publicLength
16285 || (staticVectorLengthFromPublicLength && staticVectorLength && staticVectorLength.value() == staticVectorLengthFromPublicLength.value())) {
16286 slowResultValue = lazySlowPath(
16287 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
16288 return createLazyCallGenerator(vm,
16289 operationNewArrayWithSize, locations[0].directGPR(), globalObject,
16290 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR());
16291 },
16292 structureValue, publicLength, butterflyValue);
16293 } else {
16294 slowResultValue = lazySlowPath(
16295 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
16296 return createLazyCallGenerator(vm,
16297 operationNewArrayWithSizeAndHint, locations[0].directGPR(), globalObject,
16298 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR(), locations[4].directGPR());
16299 },
16300 structureValue, publicLength, vectorLength, butterflyValue);
16301 }
16302
16303 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
16304 ValueFromBlock slowButterfly = m_out.anchor(
16305 m_out.loadPtr(slowResultValue, m_heaps.JSObject_butterfly));
16306 m_out.jump(continuation);
16307
16308 m_out.appendTo(continuation, lastNext);
16309 return ArrayValues(
16310 m_out.phi(pointerType(), fastResult, slowResult),
16311 m_out.phi(pointerType(), fastButterfly, slowButterfly));
16312 }
16313
16314 ArrayValues allocateUninitializedContiguousJSArrayInternal(LValue publicLength, LValue vectorLength, RegisteredStructure structure)
16315 {
16316 bool shouldInitializeElements = false;
16317 bool shouldLargeArraySizeCreateArrayStorage = false;
16318 return allocateJSArray(
16319 publicLength, vectorLength, weakStructure(structure), m_out.constInt32(structure->indexingType()), shouldInitializeElements,
16320 shouldLargeArraySizeCreateArrayStorage);
16321 }
16322
16323 ArrayValues allocateUninitializedContiguousJSArray(LValue publicLength, RegisteredStructure structure)
16324 {
16325 return allocateUninitializedContiguousJSArrayInternal(publicLength, publicLength, structure);
16326 }
16327
16328 ArrayValues allocateUninitializedContiguousJSArray(unsigned publicLength, unsigned vectorLength, RegisteredStructure structure)
16329 {
16330 ASSERT(vectorLength >= publicLength);
16331 return allocateUninitializedContiguousJSArrayInternal(m_out.constInt32(publicLength), m_out.constInt32(vectorLength), structure);
16332 }
16333
16334 LValue ensureShadowChickenPacket()
16335 {
16336 ShadowChicken* shadowChicken = vm().shadowChicken();
16337 RELEASE_ASSERT(shadowChicken);
16338 LBasicBlock slowCase = m_out.newBlock();
16339 LBasicBlock continuation = m_out.newBlock();
16340
16341 TypedPointer addressOfLogCursor = m_out.absolute(shadowChicken->addressOfLogCursor());
16342 LValue logCursor = m_out.loadPtr(addressOfLogCursor);
16343
16344 ValueFromBlock fastResult = m_out.anchor(logCursor);
16345
16346 m_out.branch(
16347 m_out.below(logCursor, m_out.constIntPtr(shadowChicken->logEnd())),
16348 usually(continuation), rarely(slowCase));
16349
16350 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
16351
16352 vmCall(Void, operationProcessShadowChickenLog, m_vmValue);
16353
16354 ValueFromBlock slowResult = m_out.anchor(m_out.loadPtr(addressOfLogCursor));
16355 m_out.jump(continuation);
16356
16357 m_out.appendTo(continuation, lastNext);
16358 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
16359
16360 m_out.storePtr(
16361 m_out.add(result, m_out.constIntPtr(sizeof(ShadowChicken::Packet))),
16362 addressOfLogCursor);
16363
16364 return result;
16365 }
16366
16367 LValue boolify(Edge edge)
16368 {
16369 switch (edge.useKind()) {
16370 case BooleanUse:
16371 case KnownBooleanUse:
16372 return lowBoolean(edge);
16373 case Int32Use:
16374 return m_out.notZero32(lowInt32(edge));
16375 case DoubleRepUse:
16376 return m_out.doubleNotEqualAndOrdered(lowDouble(edge), m_out.doubleZero);
16377 case ObjectOrOtherUse:
16378 return m_out.logicalNot(
16379 equalNullOrUndefined(
16380 edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
16381 ManualOperandSpeculation));
16382 case StringUse:
16383 return m_out.notEqual(lowString(edge), weakPointer(jsEmptyString(m_graph.m_vm)));
16384 case StringOrOtherUse: {
16385 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16386
16387 LBasicBlock cellCase = m_out.newBlock();
16388 LBasicBlock notCellCase = m_out.newBlock();
16389 LBasicBlock continuation = m_out.newBlock();
16390
16391 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
16392
16393 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
16394 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
16395 ValueFromBlock stringResult = m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(m_graph.m_vm))));
16396 m_out.jump(continuation);
16397
16398 m_out.appendTo(notCellCase, continuation);
16399 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16400 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
16401 m_out.jump(continuation);
16402
16403 m_out.appendTo(continuation, lastNext);
16404 return m_out.phi(Int32, stringResult, notCellResult);
16405 }
16406 case UntypedUse: {
16407 LValue value = lowJSValue(edge);
16408
16409 // Implements the following control flow structure:
16410 // if (value is cell) {
16411 // if (value is string or value is HeapBigInt)
16412 // result = !!value->length
16413 // else {
16414 // do evil things for masquerades-as-undefined
16415 // result = true
16416 // }
16417 // } else if (value is int32) {
16418 // result = !!unboxInt32(value)
16419 // } else if (value is number) {
16420 // result = !!unboxDouble(value)
16421 // } else if (value is BigInt32) {
16422 // result = (value != BigInt32Tag)
16423 // } else {
16424 // result = value == jsTrue
16425 // }
16426
16427 LBasicBlock cellCase = m_out.newBlock();
16428 LBasicBlock notStringCase = m_out.newBlock();
16429 LBasicBlock stringCase = m_out.newBlock();
16430 LBasicBlock heapBigIntCase = m_out.newBlock();
16431 LBasicBlock notStringNorHeapBigIntCase = m_out.newBlock();
16432 LBasicBlock notCellCase = m_out.newBlock();
16433 LBasicBlock int32Case = m_out.newBlock();
16434 LBasicBlock notInt32Case = m_out.newBlock();
16435 LBasicBlock doubleCase = m_out.newBlock();
16436 LBasicBlock notDoubleCase = m_out.newBlock();
16437#if USE(BIGINT32)
16438 LBasicBlock bigInt32Case = m_out.newBlock();
16439 LBasicBlock notBigInt32Case = m_out.newBlock();
16440#endif
16441 LBasicBlock continuation = m_out.newBlock();
16442
16443 Vector<ValueFromBlock> results;
16444
16445 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
16446
16447 LBasicBlock lastNext = m_out.appendTo(cellCase, notStringCase);
16448 m_out.branch(
16449 isString(value, provenType(edge) & SpecCell),
16450 unsure(stringCase), unsure(notStringCase));
16451
16452 m_out.appendTo(notStringCase, stringCase);
16453 m_out.branch(
16454 isHeapBigInt(value, provenType(edge) & (SpecCell - SpecString)),
16455 unsure(heapBigIntCase), unsure(notStringNorHeapBigIntCase));
16456
16457 m_out.appendTo(stringCase, heapBigIntCase);
16458 results.append(m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(m_graph.m_vm)))));
16459 m_out.jump(continuation);
16460
16461 m_out.appendTo(heapBigIntCase, notStringNorHeapBigIntCase);
16462 LValue nonZeroBigInt = m_out.notZero32(
16463 m_out.load32NonNegative(value, m_heaps.JSBigInt_length));
16464 results.append(m_out.anchor(nonZeroBigInt));
16465 m_out.jump(continuation);
16466
16467 m_out.appendTo(notStringNorHeapBigIntCase, notCellCase);
16468 LValue isTruthyObject;
16469 if (masqueradesAsUndefinedWatchpointIsStillValid())
16470 isTruthyObject = m_out.booleanTrue;
16471 else {
16472 LBasicBlock masqueradesCase = m_out.newBlock();
16473
16474 results.append(m_out.anchor(m_out.booleanTrue));
16475
16476 m_out.branch(
16477 m_out.testIsZero32(
16478 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
16479 m_out.constInt32(MasqueradesAsUndefined)),
16480 usually(continuation), rarely(masqueradesCase));
16481
16482 m_out.appendTo(masqueradesCase);
16483
16484 isTruthyObject = m_out.notEqual(
16485 weakPointer(m_graph.globalObjectFor(m_origin.semantic)),
16486 m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject));
16487 }
16488 results.append(m_out.anchor(isTruthyObject));
16489 m_out.jump(continuation);
16490
16491 m_out.appendTo(notCellCase, int32Case);
16492 m_out.branch(
16493 isInt32(value, provenType(edge) & ~SpecCell),
16494 unsure(int32Case), unsure(notInt32Case));
16495
16496 m_out.appendTo(int32Case, notInt32Case);
16497 results.append(m_out.anchor(m_out.notZero32(unboxInt32(value))));
16498 m_out.jump(continuation);
16499
16500 m_out.appendTo(notInt32Case, doubleCase);
16501 m_out.branch(
16502 isNumber(value, provenType(edge) & ~SpecCell),
16503 unsure(doubleCase), unsure(notDoubleCase));
16504
16505 m_out.appendTo(doubleCase, notDoubleCase);
16506 LValue doubleIsTruthy = m_out.doubleNotEqualAndOrdered(
16507 unboxDouble(value), m_out.constDouble(0));
16508 results.append(m_out.anchor(doubleIsTruthy));
16509 m_out.jump(continuation);
16510
16511#if USE(BIGINT32)
16512 m_out.appendTo(notDoubleCase, bigInt32Case);
16513 m_out.branch(
16514 isBigInt32(value, provenType(edge) & ~SpecCell),
16515 unsure(bigInt32Case), unsure(notBigInt32Case));
16516
16517 m_out.appendTo(bigInt32Case, notBigInt32Case);
16518 LValue bigInt32NotZero = m_out.notEqual(value, m_out.constInt64(JSValue::BigInt32Tag));
16519 results.append(m_out.anchor(bigInt32NotZero));
16520 m_out.jump(continuation);
16521
16522 m_out.appendTo(notBigInt32Case, continuation);
16523#else
16524 m_out.appendTo(notDoubleCase, continuation);
16525#endif
16526 LValue miscIsTruthy = m_out.equal(
16527 value, m_out.constInt64(JSValue::encode(jsBoolean(true))));
16528 results.append(m_out.anchor(miscIsTruthy));
16529 m_out.jump(continuation);
16530
16531 m_out.appendTo(continuation, lastNext);
16532 return m_out.phi(Int32, results);
16533 }
16534 default:
16535 DFG_CRASH(m_graph, m_node, "Bad use kind");
16536 return nullptr;
16537 }
16538 }
16539
16540 enum StringOrObjectMode {
16541 AllCellsAreFalse,
16542 CellCaseSpeculatesObject
16543 };
16544 enum EqualNullOrUndefinedMode {
16545 EqualNull,
16546 EqualUndefined,
16547 EqualNullOrUndefined,
16548 SpeculateNullOrUndefined
16549 };
16550 LValue equalNullOrUndefined(
16551 Edge edge, StringOrObjectMode cellMode, EqualNullOrUndefinedMode primitiveMode,
16552 OperandSpeculationMode operandMode = AutomaticOperandSpeculation)
16553 {
16554 bool validWatchpoint = masqueradesAsUndefinedWatchpointIsStillValid();
16555
16556 LValue value = lowJSValue(edge, operandMode);
16557
16558 LBasicBlock cellCase = m_out.newBlock();
16559 LBasicBlock primitiveCase = m_out.newBlock();
16560 LBasicBlock continuation = m_out.newBlock();
16561
16562 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
16563
16564 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
16565
16566 Vector<ValueFromBlock, 3> results;
16567
16568 switch (cellMode) {
16569 case AllCellsAreFalse:
16570 break;
16571 case CellCaseSpeculatesObject:
16572 FTL_TYPE_CHECK(
16573 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
16574 break;
16575 }
16576
16577 if (validWatchpoint) {
16578 results.append(m_out.anchor(m_out.booleanFalse));
16579 m_out.jump(continuation);
16580 } else {
16581 LBasicBlock masqueradesCase =
16582 m_out.newBlock();
16583
16584 results.append(m_out.anchor(m_out.booleanFalse));
16585
16586 m_out.branch(
16587 m_out.testNonZero32(
16588 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
16589 m_out.constInt32(MasqueradesAsUndefined)),
16590 rarely(masqueradesCase), usually(continuation));
16591
16592 m_out.appendTo(masqueradesCase, primitiveCase);
16593
16594 LValue structure = loadStructure(value);
16595
16596 results.append(m_out.anchor(
16597 m_out.equal(
16598 weakPointer(m_graph.globalObjectFor(m_origin.semantic)),
16599 m_out.loadPtr(structure, m_heaps.Structure_globalObject))));
16600 m_out.jump(continuation);
16601 }
16602
16603 m_out.appendTo(primitiveCase, continuation);
16604
16605 LValue primitiveResult;
16606 switch (primitiveMode) {
16607 case EqualNull:
16608 primitiveResult = m_out.equal(value, m_out.constInt64(JSValue::ValueNull));
16609 break;
16610 case EqualUndefined:
16611 primitiveResult = m_out.equal(value, m_out.constInt64(JSValue::ValueUndefined));
16612 break;
16613 case EqualNullOrUndefined:
16614 primitiveResult = isOther(value, provenType(edge));
16615 break;
16616 case SpeculateNullOrUndefined:
16617 FTL_TYPE_CHECK(
16618 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16619 primitiveResult = m_out.booleanTrue;
16620 break;
16621 }
16622 results.append(m_out.anchor(primitiveResult));
16623 m_out.jump(continuation);
16624
16625 m_out.appendTo(continuation, lastNext);
16626
16627 return m_out.phi(Int32, results);
16628 }
16629
16630 template<typename FunctionType>
16631 void contiguousPutByValOutOfBounds(
16632 FunctionType slowPathFunction, LValue base, LValue storage, LValue index, LValue value,
16633 LBasicBlock continuation)
16634 {
16635 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
16636 if (!m_node->arrayMode().isInBounds()) {
16637 LBasicBlock notInBoundsCase =
16638 m_out.newBlock();
16639 LBasicBlock performStore =
16640 m_out.newBlock();
16641
16642 LValue isNotInBounds = m_out.aboveOrEqual(
16643 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength));
16644 m_out.branch(isNotInBounds, unsure(notInBoundsCase), unsure(performStore));
16645
16646 LBasicBlock lastNext = m_out.appendTo(notInBoundsCase, performStore);
16647
16648 LValue isOutOfBounds = m_out.aboveOrEqual(
16649 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_vectorLength));
16650
16651 if (!m_node->arrayMode().isOutOfBounds())
16652 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
16653 else {
16654 LBasicBlock outOfBoundsCase =
16655 m_out.newBlock();
16656 LBasicBlock holeCase =
16657 m_out.newBlock();
16658
16659 m_out.branch(isOutOfBounds, rarely(outOfBoundsCase), usually(holeCase));
16660
16661 LBasicBlock innerLastNext = m_out.appendTo(outOfBoundsCase, holeCase);
16662
16663 vmCall(
16664 Void, slowPathFunction,
16665 weakPointer(globalObject), base, index, value);
16666
16667 m_out.jump(continuation);
16668
16669 m_out.appendTo(holeCase, innerLastNext);
16670 }
16671
16672 m_out.store32(
16673 m_out.add(index, m_out.int32One),
16674 storage, m_heaps.Butterfly_publicLength);
16675
16676 m_out.jump(performStore);
16677 m_out.appendTo(performStore, lastNext);
16678 }
16679 }
16680
16681 LValue untagArrayPtr(LValue ptr, LValue size)
16682 {
16683#if CPU(ARM64E)
16684 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
16685 authenticate->appendSomeRegister(ptr);
16686 authenticate->append(size, B3::ValueRep(B3::ValueRep::SomeLateRegister));
16687 authenticate->numGPScratchRegisters = 1;
16688 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
16689 jit.move(params[1].gpr(), params[0].gpr());
16690 jit.untagArrayPtr(params[2].gpr(), params[0].gpr(), true, params.gpScratch(0));
16691 });
16692 return authenticate;
16693#else
16694 UNUSED_PARAM(size);
16695 return ptr;
16696#endif
16697 }
16698
16699 LValue removeArrayPtrTag(LValue ptr)
16700 {
16701#if CPU(ARM64E)
16702 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
16703 authenticate->appendSomeRegister(ptr);
16704 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
16705 jit.move(params[1].gpr(), params[0].gpr());
16706 jit.removeArrayPtrTag(params[0].gpr());
16707 });
16708 return authenticate;
16709#endif
16710 return ptr;
16711 }
16712
16713 LValue caged(Gigacage::Kind kind, LValue ptr, LValue base)
16714 {
16715 auto doUntagArrayPtr = [&](LValue taggedPtr) {
16716#if CPU(ARM64E)
16717 if (kind == Gigacage::Primitive) {
16718 LValue size = m_out.load32(base, m_heaps.JSArrayBufferView_length);
16719 return untagArrayPtr(taggedPtr, size);
16720 }
16721 return ptr;
16722#else
16723 UNUSED_PARAM(taggedPtr);
16724 return ptr;
16725#endif
16726 };
16727
16728#if GIGACAGE_ENABLED
16729 if (!Gigacage::isEnabled(kind))
16730 return doUntagArrayPtr(ptr);
16731
16732 if (kind == Gigacage::Primitive && !Gigacage::disablingPrimitiveGigacageIsForbidden()) {
16733 if (vm().primitiveGigacageEnabled().isStillValid())
16734 m_graph.watchpoints().addLazily(vm().primitiveGigacageEnabled());
16735 else
16736 return doUntagArrayPtr(ptr);
16737 }
16738
16739 LValue basePtr = m_out.constIntPtr(Gigacage::basePtr(kind));
16740 LValue mask = m_out.constIntPtr(Gigacage::mask(kind));
16741
16742 LValue masked = m_out.bitAnd(ptr, mask);
16743 LValue result = m_out.add(masked, basePtr);
16744#if CPU(ARM64E)
16745 result = m_out.select(
16746 m_out.equal(ptr, m_out.constIntPtr(JSArrayBufferView::nullVectorPtr())),
16747 ptr, result);
16748#endif
16749
16750#if CPU(ARM64E)
16751 if (kind == Gigacage::Primitive) {
16752 PatchpointValue* merge = m_out.patchpoint(pointerType());
16753 merge->append(result, B3::ValueRep(B3::ValueRep::SomeLateRegister));
16754 merge->appendSomeRegister(ptr);
16755 merge->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
16756 jit.move(params[2].gpr(), params[0].gpr());
16757 jit.insertBitField64(params[1].gpr(), CCallHelpers::TrustedImm32(0), CCallHelpers::TrustedImm32(64 - MacroAssembler::maxNumberOfAllowedPACBits), params[0].gpr());
16758 });
16759
16760 result = doUntagArrayPtr(merge);
16761 }
16762#endif // CPU(ARM64E)
16763
16764 // Make sure that B3 doesn't try to do smart reassociation of these pointer bits.
16765 // FIXME: In an ideal world, B3 would not do harmful reassociations, and if it did, it would be able
16766 // to undo them during constant hoisting and regalloc. As it stands, if you remove this then Octane
16767 // gets 1.6% slower and Kraken gets 5% slower. It's all because the basePtr, which is a constant,
16768 // gets reassociated out of the add above and into the address arithmetic. This disables hoisting of
16769 // the basePtr constant. Hoisting that constant is worth a lot more perf than the reassociation. One
16770 // way to make this all work happily is to combine offset legalization with constant hoisting, and
16771 // then teach it reassociation. So, Add(Add(a, b), const) where a is loop-invariant while b isn't
16772 // will turn into Add(Add(a, const), b) by the constant hoister. We would have to teach B3 to do this
16773 // and possibly other smart things if we want to be able to remove this opaque.
16774 // https://bugs.webkit.org/show_bug.cgi?id=175493
16775 return m_out.opaque(result);
16776#endif
16777
16778 UNUSED_PARAM(kind);
16779 UNUSED_PARAM(base);
16780 return doUntagArrayPtr(ptr);
16781 }
16782
16783 void buildSwitch(SwitchData* data, LType type, LValue switchValue)
16784 {
16785 ASSERT(type == pointerType() || type == Int32);
16786
16787 Vector<SwitchCase> cases;
16788 for (unsigned i = 0; i < data->cases.size(); ++i) {
16789 SwitchCase newCase;
16790
16791 if (type == pointerType()) {
16792 newCase = SwitchCase(m_out.constIntPtr(data->cases[i].value.switchLookupValue(data->kind)),
16793 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
16794 } else if (type == Int32) {
16795 newCase = SwitchCase(m_out.constInt32(data->cases[i].value.switchLookupValue(data->kind)),
16796 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
16797 } else
16798 CRASH();
16799
16800 cases.append(newCase);
16801 }
16802
16803 m_out.switchInstruction(
16804 switchValue, cases,
16805 lowBlock(data->fallThrough.block), Weight(data->fallThrough.count));
16806 }
16807
16808 void switchString(SwitchData* data, LValue string, Edge& edge)
16809 {
16810 bool canDoBinarySwitch = true;
16811 unsigned totalLength = 0;
16812
16813 for (DFG::SwitchCase myCase : data->cases) {
16814 StringImpl* string = myCase.value.stringImpl();
16815 if (!string->is8Bit()) {
16816 canDoBinarySwitch = false;
16817 break;
16818 }
16819 if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
16820 canDoBinarySwitch = false;
16821 break;
16822 }
16823 totalLength += string->length();
16824 }
16825
16826 if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
16827 switchStringSlow(data, string);
16828 return;
16829 }
16830
16831 LBasicBlock hasImplBlock = m_out.newBlock();
16832 LBasicBlock is8BitBlock = m_out.newBlock();
16833 LBasicBlock slowBlock = m_out.newBlock();
16834
16835 m_out.branch(isRopeString(string, edge), unsure(slowBlock), unsure(hasImplBlock));
16836
16837 LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock);
16838
16839 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
16840 LValue length = m_out.load32(stringImpl, m_heaps.StringImpl_length);
16841
16842 m_out.branch(
16843 m_out.testIsZero32(
16844 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
16845 m_out.constInt32(StringImpl::flagIs8Bit())),
16846 unsure(slowBlock), unsure(is8BitBlock));
16847
16848 m_out.appendTo(is8BitBlock, slowBlock);
16849
16850 LValue buffer = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
16851
16852 // FIXME: We should propagate branch weight data to the cases of this switch.
16853 // https://bugs.webkit.org/show_bug.cgi?id=144368
16854
16855 Vector<StringSwitchCase> cases;
16856 for (DFG::SwitchCase myCase : data->cases)
16857 cases.append(StringSwitchCase(myCase.value.stringImpl(), lowBlock(myCase.target.block)));
16858 std::sort(cases.begin(), cases.end());
16859 switchStringRecurse(data, buffer, length, cases, 0, 0, cases.size(), 0, false);
16860
16861 m_out.appendTo(slowBlock, lastNext);
16862 switchStringSlow(data, string);
16863 }
16864
16865 // The code for string switching is based closely on the same code in the DFG backend. While it
16866 // would be nice to reduce the amount of similar-looking code, it seems like this is one of
16867 // those algorithms where factoring out the common bits would result in more code than just
16868 // duplicating.
16869
16870 struct StringSwitchCase {
16871 StringSwitchCase() { }
16872
16873 StringSwitchCase(StringImpl* string, LBasicBlock target)
16874 : string(string)
16875 , target(target)
16876 {
16877 }
16878
16879 bool operator<(const StringSwitchCase& other) const
16880 {
16881 return stringLessThan(*string, *other.string);
16882 }
16883
16884 StringImpl* string;
16885 LBasicBlock target;
16886 };
16887
16888 struct CharacterCase {
16889 CharacterCase()
16890 : character(0)
16891 , begin(0)
16892 , end(0)
16893 {
16894 }
16895
16896 CharacterCase(LChar character, unsigned begin, unsigned end)
16897 : character(character)
16898 , begin(begin)
16899 , end(end)
16900 {
16901 }
16902
16903 bool operator<(const CharacterCase& other) const
16904 {
16905 return character < other.character;
16906 }
16907
16908 LChar character;
16909 unsigned begin;
16910 unsigned end;
16911 };
16912
16913 void switchStringRecurse(
16914 SwitchData* data, LValue buffer, LValue length, const Vector<StringSwitchCase>& cases,
16915 unsigned numChecked, unsigned begin, unsigned end, unsigned alreadyCheckedLength,
16916 unsigned checkedExactLength)
16917 {
16918 LBasicBlock fallThrough = lowBlock(data->fallThrough.block);
16919
16920 if (begin == end) {
16921 m_out.jump(fallThrough);
16922 return;
16923 }
16924
16925 unsigned minLength = cases[begin].string->length();
16926 unsigned commonChars = minLength;
16927 bool allLengthsEqual = true;
16928 for (unsigned i = begin + 1; i < end; ++i) {
16929 unsigned myCommonChars = numChecked;
16930 unsigned limit = std::min(cases[begin].string->length(), cases[i].string->length());
16931 for (unsigned j = numChecked; j < limit; ++j) {
16932 if (cases[begin].string->at(j) != cases[i].string->at(j))
16933 break;
16934 myCommonChars++;
16935 }
16936 commonChars = std::min(commonChars, myCommonChars);
16937 if (minLength != cases[i].string->length())
16938 allLengthsEqual = false;
16939 minLength = std::min(minLength, cases[i].string->length());
16940 }
16941
16942 if (checkedExactLength) {
16943 DFG_ASSERT(m_graph, m_node, alreadyCheckedLength == minLength, alreadyCheckedLength, minLength);
16944 DFG_ASSERT(m_graph, m_node, allLengthsEqual);
16945 }
16946
16947 DFG_ASSERT(m_graph, m_node, minLength >= commonChars, minLength, commonChars);
16948
16949 if (!allLengthsEqual && alreadyCheckedLength < minLength)
16950 m_out.check(m_out.below(length, m_out.constInt32(minLength)), unsure(fallThrough));
16951 if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
16952 m_out.check(m_out.notEqual(length, m_out.constInt32(minLength)), unsure(fallThrough));
16953
16954 for (unsigned i = numChecked; i < commonChars; ++i) {
16955 m_out.check(
16956 m_out.notEqual(
16957 m_out.load8ZeroExt32(buffer, m_heaps.characters8[i]),
16958 m_out.constInt32(static_cast<uint16_t>(cases[begin].string->at(i)))),
16959 unsure(fallThrough));
16960 }
16961
16962 if (minLength == commonChars) {
16963 // This is the case where one of the cases is a prefix of all of the other cases.
16964 // We've already checked that the input string is a prefix of all of the cases,
16965 // so we just check length to jump to that case.
16966
16967 DFG_ASSERT(m_graph, m_node, cases[begin].string->length() == commonChars, cases[begin].string->length(), commonChars);
16968 for (unsigned i = begin + 1; i < end; ++i)
16969 DFG_ASSERT(m_graph, m_node, cases[i].string->length() > commonChars, cases[i].string->length(), commonChars);
16970
16971 if (allLengthsEqual) {
16972 DFG_ASSERT(m_graph, m_node, end == begin + 1, end, begin);
16973 m_out.jump(cases[begin].target);
16974 return;
16975 }
16976
16977 m_out.check(
16978 m_out.equal(length, m_out.constInt32(commonChars)),
16979 unsure(cases[begin].target));
16980
16981 // We've checked if the length is >= minLength, and then we checked if the length is
16982 // == commonChars. We get to this point if it is >= minLength but not == commonChars.
16983 // Hence we know that it now must be > minLength, i.e. that it's >= minLength + 1.
16984 switchStringRecurse(
16985 data, buffer, length, cases, commonChars, begin + 1, end, minLength + 1, false);
16986 return;
16987 }
16988
16989 // At this point we know that the string is longer than commonChars, and we've only verified
16990 // commonChars. Use a binary switch on the next unchecked character, i.e.
16991 // string[commonChars].
16992
16993 DFG_ASSERT(m_graph, m_node, end >= begin + 2, end, begin);
16994
16995 LValue uncheckedChar = m_out.load8ZeroExt32(buffer, m_heaps.characters8[commonChars]);
16996
16997 Vector<CharacterCase> characterCases;
16998 CharacterCase currentCase(cases[begin].string->at(commonChars), begin, begin + 1);
16999 for (unsigned i = begin + 1; i < end; ++i) {
17000 LChar currentChar = cases[i].string->at(commonChars);
17001 if (currentChar != currentCase.character) {
17002 currentCase.end = i;
17003 characterCases.append(currentCase);
17004 currentCase = CharacterCase(currentChar, i, i + 1);
17005 } else
17006 currentCase.end = i + 1;
17007 }
17008 characterCases.append(currentCase);
17009
17010 Vector<LBasicBlock> characterBlocks;
17011 for (unsigned i = characterCases.size(); i--;)
17012 characterBlocks.append(m_out.newBlock());
17013
17014 Vector<SwitchCase> switchCases;
17015 for (unsigned i = 0; i < characterCases.size(); ++i) {
17016 if (i)
17017 DFG_ASSERT(m_graph, m_node, characterCases[i - 1].character < characterCases[i].character);
17018 switchCases.append(SwitchCase(
17019 m_out.constInt32(characterCases[i].character), characterBlocks[i], Weight()));
17020 }
17021 m_out.switchInstruction(uncheckedChar, switchCases, fallThrough, Weight());
17022
17023 LBasicBlock lastNext = m_out.m_nextBlock;
17024 characterBlocks.append(lastNext); // Makes it convenient to set nextBlock.
17025 for (unsigned i = 0; i < characterCases.size(); ++i) {
17026 m_out.appendTo(characterBlocks[i], characterBlocks[i + 1]);
17027 switchStringRecurse(
17028 data, buffer, length, cases, commonChars + 1,
17029 characterCases[i].begin, characterCases[i].end, minLength, allLengthsEqual);
17030 }
17031
17032 DFG_ASSERT(m_graph, m_node, m_out.m_nextBlock == lastNext);
17033 }
17034
17035 void switchStringSlow(SwitchData* data, LValue string)
17036 {
17037 // FIXME: We ought to be able to use computed gotos here. We would save the labels of the
17038 // blocks we want to jump to, and then request their addresses after compilation completes.
17039 // https://bugs.webkit.org/show_bug.cgi?id=144369
17040
17041 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
17042
17043 const UnlinkedStringJumpTable& unlinkedTable = m_graph.unlinkedStringSwitchJumpTable(data->switchTableIndex);
17044
17045 LValue branchIndex = vmCall(
17046 Int32, operationSwitchStringAndGetIndex,
17047 weakPointer(globalObject), m_out.constIntPtr(&unlinkedTable), string);
17048
17049 Vector<SwitchCase> cases;
17050 // These may be negative, or zero, or probably other stuff, too. We don't want to mess with HashSet's corner cases and we don't really care about throughput here.
17051 StdUnorderedSet<int32_t> alreadyHandled;
17052 for (unsigned i = 0; i < data->cases.size(); ++i) {
17053 // FIXME: The fact that we're using the bytecode's switch table means that the
17054 // following DFG IR transformation would be invalid.
17055 //
17056 // Original code:
17057 // switch (v) {
17058 // case "foo":
17059 // case "bar":
17060 // things();
17061 // break;
17062 // default:
17063 // break;
17064 // }
17065 //
17066 // New code:
17067 // switch (v) {
17068 // case "foo":
17069 // instrumentFoo();
17070 // goto _things;
17071 // case "bar":
17072 // instrumentBar();
17073 // _things:
17074 // things();
17075 // break;
17076 // default:
17077 // break;
17078 // }
17079 //
17080 // Luckily, we don't currently do any such transformation. But it's kind of silly that
17081 // this is an issue.
17082 // https://bugs.webkit.org/show_bug.cgi?id=144635
17083
17084 DFG::SwitchCase myCase = data->cases[i];
17085 auto iter = unlinkedTable.m_offsetTable.find(myCase.value.stringImpl());
17086 DFG_ASSERT(m_graph, m_node, iter != unlinkedTable.m_offsetTable.end());
17087
17088 // Use m_indexInTable instead of m_branchOffset to make Switch table dense.
17089 if (!alreadyHandled.insert(iter->value.m_indexInTable).second)
17090 continue;
17091
17092 cases.append(SwitchCase(
17093 m_out.constInt32(iter->value.m_indexInTable),
17094 lowBlock(myCase.target.block), Weight(myCase.target.count)));
17095 }
17096
17097 m_out.switchInstruction(
17098 branchIndex, cases, lowBlock(data->fallThrough.block),
17099 Weight(data->fallThrough.count));
17100 }
17101
17102 // Calls the functor at the point of code generation where we know what the result type is.
17103 // You can emit whatever code you like at that point. Expects you to terminate the basic block.
17104 // When buildTypeOf() returns, it will have terminated all basic blocks that it created. So, if
17105 // you aren't using this as the terminator of a high-level block, you should create your own
17106 // contination and set it as the nextBlock (m_out.insertNewBlocksBefore(continuation)) before
17107 // calling this. For example:
17108 //
17109 // LBasicBlock continuation = m_out.newBlock();
17110 // LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
17111 // buildTypeOf(
17112 // child, value,
17113 // [&] (TypeofType type) {
17114 // do things;
17115 // m_out.jump(continuation);
17116 // });
17117 // m_out.appendTo(continuation, lastNext);
17118 template<typename Functor>
17119 void buildTypeOf(Edge child, LValue value, const Functor& functor)
17120 {
17121 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
17122
17123 // Implements the following branching structure:
17124 //
17125 // if (is cell) {
17126 // if (is object) {
17127 // if (is function) {
17128 // return function;
17129 // } else if (doesn't have call trap and doesn't masquerade as undefined) {
17130 // return object
17131 // } else {
17132 // return slowPath();
17133 // }
17134 // } else if (is string) {
17135 // return string
17136 // } else if (is heapbigint) {
17137 // return bigint
17138 // } else {
17139 // return symbol
17140 // }
17141 // } else if (is number) {
17142 // return number
17143 // } else if (is bigint32) {
17144 // return bigint
17145 // } else if (is null) {
17146 // return object
17147 // } else if (is boolean) {
17148 // return boolean
17149 // } else {
17150 // return undefined
17151 // }
17152 //
17153 // FIXME: typeof Symbol should be more frequently seen than BigInt.
17154 // We should change the order of type detection based on this frequency.
17155 // https://bugs.webkit.org/show_bug.cgi?id=192650
17156
17157 LBasicBlock cellCase = m_out.newBlock();
17158 LBasicBlock objectCase = m_out.newBlock();
17159 LBasicBlock functionCase = m_out.newBlock();
17160 LBasicBlock notFunctionCase = m_out.newBlock();
17161 LBasicBlock reallyObjectCase = m_out.newBlock();
17162 LBasicBlock slowPath = m_out.newBlock();
17163 LBasicBlock unreachable = m_out.newBlock();
17164 LBasicBlock notObjectCase = m_out.newBlock();
17165 LBasicBlock stringCase = m_out.newBlock();
17166 LBasicBlock notStringCase = m_out.newBlock();
17167 LBasicBlock bigIntCase = m_out.newBlock();
17168 LBasicBlock symbolCase = m_out.newBlock();
17169 LBasicBlock notCellCase = m_out.newBlock();
17170 LBasicBlock numberCase = m_out.newBlock();
17171 LBasicBlock notNumberCase = m_out.newBlock();
17172#if USE(BIGINT32)
17173 LBasicBlock notBigInt32Case = m_out.newBlock();
17174#endif
17175 LBasicBlock notNullCase = m_out.newBlock();
17176 LBasicBlock booleanCase = m_out.newBlock();
17177 LBasicBlock undefinedCase = m_out.newBlock();
17178
17179 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
17180
17181 LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase);
17182 m_out.branch(isObject(value, provenType(child)), unsure(objectCase), unsure(notObjectCase));
17183
17184 m_out.appendTo(objectCase, functionCase);
17185 m_out.branch(
17186 isFunction(value, provenType(child) & SpecObject),
17187 unsure(functionCase), unsure(notFunctionCase));
17188
17189 m_out.appendTo(functionCase, notFunctionCase);
17190 functor(TypeofType::Function);
17191
17192 m_out.appendTo(notFunctionCase, reallyObjectCase);
17193 m_out.branch(
17194 isExoticForTypeof(value, provenType(child) & (SpecObject - SpecFunction)),
17195 rarely(slowPath), usually(reallyObjectCase));
17196
17197 m_out.appendTo(reallyObjectCase, slowPath);
17198 functor(TypeofType::Object);
17199
17200 m_out.appendTo(slowPath, unreachable);
17201 VM& vm = this->vm();
17202 LValue result = lazySlowPath(
17203 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
17204 return createLazyCallGenerator(vm,
17205 operationTypeOfObjectAsTypeofType, locations[0].directGPR(),
17206 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
17207 }, value);
17208 Vector<SwitchCase, 3> cases;
17209 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Undefined)), undefinedCase));
17210 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Object)), reallyObjectCase));
17211 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Function)), functionCase));
17212 m_out.switchInstruction(m_out.castToInt32(result), cases, unreachable, Weight());
17213
17214 m_out.appendTo(unreachable, notObjectCase);
17215 m_out.unreachable();
17216
17217 m_out.appendTo(notObjectCase, stringCase);
17218 m_out.branch(
17219 isString(value, provenType(child) & (SpecCell - SpecObject)),
17220 unsure(stringCase), unsure(notStringCase));
17221
17222 m_out.appendTo(stringCase, notStringCase);
17223 functor(TypeofType::String);
17224
17225 m_out.appendTo(notStringCase, bigIntCase);
17226 m_out.branch(
17227 isHeapBigInt(value, provenType(child) & (SpecCell - SpecObject - SpecString)),
17228 unsure(bigIntCase), unsure(symbolCase));
17229
17230 m_out.appendTo(bigIntCase, symbolCase);
17231 functor(TypeofType::BigInt);
17232
17233 m_out.appendTo(symbolCase, notCellCase);
17234 functor(TypeofType::Symbol);
17235
17236 m_out.appendTo(notCellCase, numberCase);
17237 m_out.branch(
17238 isNumber(value, provenType(child) & ~SpecCell),
17239 unsure(numberCase), unsure(notNumberCase));
17240
17241 m_out.appendTo(numberCase, notNumberCase);
17242 functor(TypeofType::Number);
17243
17244#if USE(BIGINT32)
17245 m_out.appendTo(notNumberCase, notBigInt32Case);
17246 m_out.branch(isBigInt32(value, provenType(child) & ~SpecCell), unsure(bigIntCase), unsure(notBigInt32Case));
17247
17248 m_out.appendTo(notBigInt32Case, notNullCase);
17249#else
17250 m_out.appendTo(notNumberCase, notNullCase);
17251#endif
17252 LValue isNull;
17253 if (provenType(child) & SpecOther)
17254 isNull = m_out.equal(value, m_out.constInt64(JSValue::ValueNull));
17255 else
17256 isNull = m_out.booleanFalse;
17257 m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase));
17258
17259 m_out.appendTo(notNullCase, booleanCase);
17260 m_out.branch(
17261 isBoolean(value, provenType(child) & ~(SpecCell | SpecFullNumber)),
17262 unsure(booleanCase), unsure(undefinedCase));
17263
17264 m_out.appendTo(booleanCase, undefinedCase);
17265 functor(TypeofType::Boolean);
17266
17267 m_out.appendTo(undefinedCase, lastNext);
17268 functor(TypeofType::Undefined);
17269 }
17270
17271 TypedPointer pointerIntoTypedArray(LValue storage, LValue index, TypedArrayType type)
17272 {
17273 LValue offset = m_out.shl(m_out.zeroExtPtr(index), m_out.constIntPtr(logElementSize(type)));
17274
17275 return TypedPointer(
17276 m_heaps.typedArrayProperties,
17277 m_out.add(
17278 storage,
17279 offset
17280 ));
17281 }
17282
17283 LValue loadFromIntTypedArray(TypedPointer pointer, TypedArrayType type)
17284 {
17285 switch (elementSize(type)) {
17286 case 1:
17287 return isSigned(type) ? m_out.load8SignExt32(pointer) : m_out.load8ZeroExt32(pointer);
17288 case 2:
17289 return isSigned(type) ? m_out.load16SignExt32(pointer) : m_out.load16ZeroExt32(pointer);
17290 case 4:
17291 return m_out.load32(pointer);
17292 default:
17293 DFG_CRASH(m_graph, m_node, "Bad element size");
17294 }
17295 }
17296
17297 Output::StoreType storeType(TypedArrayType type)
17298 {
17299 if (isInt(type)) {
17300 switch (elementSize(type)) {
17301 case 1:
17302 return Output::Store32As8;
17303 case 2:
17304 return Output::Store32As16;
17305 case 4:
17306 return Output::Store32;
17307 default:
17308 DFG_CRASH(m_graph, m_node, "Bad element size");
17309 return Output::Store32;
17310 }
17311 }
17312 switch (type) {
17313 case TypeFloat32:
17314 return Output::StoreFloat;
17315 case TypeFloat64:
17316 return Output::StoreDouble;
17317 default:
17318 DFG_CRASH(m_graph, m_node, "Bad typed array type");
17319 }
17320 }
17321
17322 void setIntTypedArrayLoadResult(LValue result, TypedArrayType type, bool canSpeculate)
17323 {
17324 if (elementSize(type) < 4 || isSigned(type)) {
17325 setInt32(result);
17326 return;
17327 }
17328
17329 if (m_node->shouldSpeculateInt32() && canSpeculate) {
17330 speculate(
17331 Overflow, noValue(), nullptr, m_out.lessThan(result, m_out.int32Zero));
17332 setInt32(result);
17333 return;
17334 }
17335
17336 if (m_node->shouldSpeculateInt52()) {
17337 setStrictInt52(m_out.zeroExt(result, Int64));
17338 return;
17339 }
17340
17341 setDouble(m_out.unsignedToDouble(result));
17342 }
17343
17344 LValue getIntTypedArrayStoreOperand(Edge edge, bool isClamped = false)
17345 {
17346 LValue intValue;
17347 switch (edge.useKind()) {
17348 case Int52RepUse:
17349 case Int32Use: {
17350 if (edge.useKind() == Int32Use)
17351 intValue = lowInt32(edge);
17352 else
17353 intValue = m_out.castToInt32(lowStrictInt52(edge));
17354
17355 if (isClamped) {
17356 LBasicBlock atLeastZero = m_out.newBlock();
17357 LBasicBlock continuation = m_out.newBlock();
17358
17359 Vector<ValueFromBlock, 2> intValues;
17360 intValues.append(m_out.anchor(m_out.int32Zero));
17361 m_out.branch(
17362 m_out.lessThan(intValue, m_out.int32Zero),
17363 unsure(continuation), unsure(atLeastZero));
17364
17365 LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
17366
17367 intValues.append(m_out.anchor(m_out.select(
17368 m_out.greaterThan(intValue, m_out.constInt32(255)),
17369 m_out.constInt32(255),
17370 intValue)));
17371 m_out.jump(continuation);
17372
17373 m_out.appendTo(continuation, lastNext);
17374 intValue = m_out.phi(Int32, intValues);
17375 }
17376 break;
17377 }
17378
17379 case DoubleRepUse: {
17380 LValue doubleValue = lowDouble(edge);
17381
17382 if (isClamped) {
17383 LBasicBlock atLeastZero = m_out.newBlock();
17384 LBasicBlock withinRange = m_out.newBlock();
17385 LBasicBlock continuation = m_out.newBlock();
17386
17387 Vector<ValueFromBlock, 3> intValues;
17388 intValues.append(m_out.anchor(m_out.int32Zero));
17389 m_out.branch(
17390 m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
17391 unsure(continuation), unsure(atLeastZero));
17392
17393 LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
17394 intValues.append(m_out.anchor(m_out.constInt32(255)));
17395 m_out.branch(
17396 m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
17397 unsure(continuation), unsure(withinRange));
17398
17399 m_out.appendTo(withinRange, continuation);
17400 intValues.append(m_out.anchor(m_out.doubleToInt(doubleValue)));
17401 m_out.jump(continuation);
17402
17403 m_out.appendTo(continuation, lastNext);
17404 intValue = m_out.phi(Int32, intValues);
17405 } else
17406 intValue = doubleToInt32(doubleValue);
17407 break;
17408 }
17409
17410 default:
17411 DFG_CRASH(m_graph, m_node, "Bad use kind");
17412 }
17413
17414 return intValue;
17415 }
17416
17417 LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true)
17418 {
17419 LBasicBlock greatEnough = m_out.newBlock();
17420 LBasicBlock withinRange = m_out.newBlock();
17421 LBasicBlock slowPath = m_out.newBlock();
17422 LBasicBlock continuation = m_out.newBlock();
17423
17424 Vector<ValueFromBlock, 2> results;
17425
17426 m_out.branch(
17427 m_out.doubleGreaterThanOrEqual(doubleValue, m_out.constDouble(low)),
17428 unsure(greatEnough), unsure(slowPath));
17429
17430 LBasicBlock lastNext = m_out.appendTo(greatEnough, withinRange);
17431 m_out.branch(
17432 m_out.doubleLessThanOrEqual(doubleValue, m_out.constDouble(high)),
17433 unsure(withinRange), unsure(slowPath));
17434
17435 m_out.appendTo(withinRange, slowPath);
17436 LValue fastResult;
17437 if (isSigned)
17438 fastResult = m_out.doubleToInt(doubleValue);
17439 else
17440 fastResult = m_out.doubleToUInt(doubleValue);
17441 results.append(m_out.anchor(fastResult));
17442 m_out.jump(continuation);
17443
17444 m_out.appendTo(slowPath, continuation);
17445 results.append(m_out.anchor(m_out.castToInt32(m_out.callWithoutSideEffects(Int64, operationToInt32, doubleValue))));
17446 m_out.jump(continuation);
17447
17448 m_out.appendTo(continuation, lastNext);
17449 return m_out.phi(Int32, results);
17450 }
17451
17452 LValue doubleToInt32(LValue doubleValue)
17453 {
17454#if CPU(ARM64)
17455 if (MacroAssemblerARM64::supportsDoubleToInt32ConversionUsingJavaScriptSemantics()) {
17456 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
17457 patchpoint->append(ConstrainedValue(doubleValue, B3::ValueRep::SomeRegister));
17458 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
17459 jit.convertDoubleToInt32UsingJavaScriptSemantics(params[1].fpr(), params[0].gpr());
17460 });
17461 patchpoint->effects = Effects::none();
17462 return patchpoint;
17463 }
17464#endif
17465
17466 if (hasSensibleDoubleToInt())
17467 return sensibleDoubleToInt32(doubleValue);
17468
17469 double limit = pow(2, 31) - 1;
17470 return doubleToInt32(doubleValue, -limit, limit);
17471 }
17472
17473 LValue sensibleDoubleToInt32(LValue doubleValue)
17474 {
17475 LBasicBlock slowPath = m_out.newBlock();
17476 LBasicBlock continuation = m_out.newBlock();
17477
17478 LValue fastResultValue = m_out.doubleToInt(doubleValue);
17479 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
17480 m_out.branch(
17481 m_out.equal(fastResultValue, m_out.constInt32(0x80000000)),
17482 rarely(slowPath), usually(continuation));
17483
17484 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
17485 ValueFromBlock slowResult = m_out.anchor(m_out.castToInt32(m_out.callWithoutSideEffects(Int64, operationToInt32SensibleSlow, doubleValue)));
17486 m_out.jump(continuation);
17487
17488 m_out.appendTo(continuation, lastNext);
17489 return m_out.phi(Int32, fastResult, slowResult);
17490 }
17491
17492 // This is a mechanism for creating a code generator that fills in a gap in the code using our
17493 // own MacroAssembler. This is useful for slow paths that involve a lot of code and we don't want
17494 // to pay the price of B3 optimizing it. A lazy slow path will only be generated if it actually
17495 // executes. On the other hand, a lazy slow path always incurs the cost of two additional jumps.
17496 // Also, the lazy slow path's register allocation state is slaved to whatever B3 did, so you
17497 // have to use a ScratchRegisterAllocator to try to use some unused registers and you may have
17498 // to spill to top of stack if there aren't enough registers available.
17499 //
17500 // Lazy slow paths involve three different stages of execution. Each stage has unique
17501 // capabilities and knowledge. The stages are:
17502 //
17503 // 1) DFG->B3 lowering, i.e. code that runs in this phase. Lowering is the last time you will
17504 // have access to LValues. If there is an LValue that needs to be fed as input to a lazy slow
17505 // path, then you must pass it as an argument here (as one of the varargs arguments after the
17506 // functor). But, lowering doesn't know which registers will be used for those LValues. Hence
17507 // you pass a lambda to lazySlowPath() and that lambda will run during stage (2):
17508 //
17509 // 2) FTLCompile.cpp's fixFunctionBasedOnStackMaps. This code is the only stage at which we know
17510 // the mapping from arguments passed to this method in (1) and the registers that B3
17511 // selected for those arguments. You don't actually want to generate any code here, since then
17512 // the slow path wouldn't actually be lazily generated. Instead, you want to save the
17513 // registers being used for the arguments and defer code generation to stage (3) by creating
17514 // and returning a LazySlowPath::Generator:
17515 //
17516 // 3) LazySlowPath's generate() method. This code runs in response to the lazy slow path
17517 // executing for the first time. It will call the generator you created in stage (2).
17518 //
17519 // Note that each time you invoke stage (1), stage (2) may be invoked zero, one, or many times.
17520 // Stage (2) will usually be invoked once for stage (1). But, B3 may kill the code, in which
17521 // case stage (2) won't run. B3 may duplicate the code (for example via tail duplication),
17522 // leading to many calls to your stage (2) lambda. Stage (3) may be called zero or once for each
17523 // stage (2). It will be called zero times if the slow path never runs. This is what you hope for
17524 // whenever you use the lazySlowPath() mechanism.
17525 //
17526 // A typical use of lazySlowPath() will look like the example below, which just creates a slow
17527 // path that adds some value to the input and returns it.
17528 //
17529 // // Stage (1) is here. This is your last chance to figure out which LValues to use as inputs.
17530 // // Notice how we pass "input" as an argument to lazySlowPath().
17531 // LValue input = ...;
17532 // int addend = ...;
17533 // LValue output = lazySlowPath(
17534 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
17535 // // Stage (2) is here. This is your last chance to figure out which registers are used
17536 // // for which values. Location zero is always the return value. You can ignore it if
17537 // // you don't want to return anything. Location 1 is the register for the first
17538 // // argument to the lazySlowPath(), i.e. "input". Note that the Location object could
17539 // // also hold an FPR, if you are passing a double.
17540 // GPRReg outputGPR = locations[0].directGPR();
17541 // GPRReg inputGPR = locations[1].directGPR();
17542 // return LazySlowPath::createGenerator(
17543 // [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
17544 // // Stage (3) is here. This is when you generate code. You have access to the
17545 // // registers you collected in stage (2) because this lambda closes over those
17546 // // variables (outputGPR and inputGPR). You also have access to whatever extra
17547 // // data you collected in stage (1), such as the addend in this case.
17548 // jit.add32(TrustedImm32(addend), inputGPR, outputGPR);
17549 // // You have to end by jumping to done. There is nothing to fall through to.
17550 // // You can also jump to the exception handler (see LazySlowPath.h for more
17551 // // info). Note that currently you cannot OSR exit.
17552 // params.doneJumps.append(jit.jump());
17553 // });
17554 // },
17555 // input);
17556 //
17557 // You can basically pass as many inputs as you like, either using this varargs form, or by
17558 // passing a Vector of LValues.
17559 //
17560 // Note that if your slow path is only doing a call, you can use the createLazyCallGenerator()
17561 // helper. For example:
17562 //
17563 // LValue input = ...;
17564 // LValue output = lazySlowPath(
17565 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
17566 // return createLazyCallGenerator(
17567 // operationDoThings, locations[0].directGPR(), locations[1].directGPR());
17568 // }, input);
17569 //
17570 // Finally, note that all of the lambdas - both the stage (2) lambda and the stage (3) lambda -
17571 // run after the function that created them returns. Hence, you should not use by-reference
17572 // capture (i.e. [&]) in any of these lambdas.
17573 template<typename Functor, typename... ArgumentTypes>
17574 PatchpointValue* lazySlowPath(const Functor& functor, ArgumentTypes... arguments)
17575 {
17576 return lazySlowPath(functor, Vector<LValue>{ arguments... });
17577 }
17578
17579 template<typename Functor>
17580 PatchpointValue* lazySlowPath(const Functor& functor, const Vector<LValue>& userArguments)
17581 {
17582 CodeOrigin origin = m_origin.semantic;
17583
17584 PatchpointValue* result = m_out.patchpoint(B3::Int64);
17585 for (LValue arg : userArguments)
17586 result->append(ConstrainedValue(arg, B3::ValueRep::SomeRegister));
17587
17588 RefPtr<PatchpointExceptionHandle> exceptionHandle =
17589 preparePatchpointForExceptions(result);
17590
17591 result->clobber(RegisterSet::macroScratchRegisters());
17592 State* state = &m_ftlState;
17593
17594 result->setGenerator(
17595 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
17596 Vector<Location> locations;
17597 for (const B3::ValueRep& rep : params)
17598 locations.append(Location::forValueRep(rep));
17599
17600 RefPtr<LazySlowPath::Generator> generator = functor(locations);
17601
17602 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
17603 CCallHelpers::Label done = jit.label();
17604
17605 RegisterSet usedRegisters = params.unavailableRegisters();
17606
17607 RefPtr<ExceptionTarget> exceptionTarget =
17608 exceptionHandle->scheduleExitCreation(params);
17609
17610 // FIXME: As part of handling exceptions, we need to create a concrete OSRExit here.
17611 // Doing so should automagically register late paths that emit exit thunks.
17612
17613 params.addLatePath(
17614 [=] (CCallHelpers& jit) {
17615 AllowMacroScratchRegisterUsage allowScratch(jit);
17616 patchableJump.m_jump.link(&jit);
17617 unsigned index = state->jitCode->lazySlowPaths.size();
17618 state->jitCode->lazySlowPaths.append(nullptr);
17619 jit.pushToSaveImmediateWithoutTouchingRegisters(
17620 CCallHelpers::TrustedImm32(index));
17621 CCallHelpers::Jump generatorJump = jit.jump();
17622
17623 // Note that so long as we're here, we don't really know if our late path
17624 // runs before or after any other late paths that we might depend on, like
17625 // the exception thunk.
17626
17627 RefPtr<JITCode> jitCode = state->jitCode;
17628 VM* vm = &state->graph.m_vm;
17629
17630 jit.addLinkTask(
17631 [=] (LinkBuffer& linkBuffer) {
17632 linkBuffer.link(generatorJump,
17633 CodeLocationLabel<JITThunkPtrTag>(vm->getCTIStub(lazySlowPathGenerationThunkGenerator).code()));
17634
17635 std::unique_ptr<LazySlowPath> lazySlowPath = makeUnique<LazySlowPath>();
17636
17637 auto linkedPatchableJump = CodeLocationJump<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(patchableJump));
17638
17639 CodeLocationLabel<JSInternalPtrTag> linkedDone = linkBuffer.locationOf<JSInternalPtrTag>(done);
17640
17641 CallSiteIndex callSiteIndex =
17642 jitCode->common.codeOrigins->addUniqueCallSiteIndex(origin);
17643
17644 lazySlowPath->initialize(
17645 linkedPatchableJump, linkedDone,
17646 exceptionTarget->label(linkBuffer), usedRegisters,
17647 callSiteIndex, generator);
17648
17649 jitCode->lazySlowPaths[index] = WTFMove(lazySlowPath);
17650 });
17651 });
17652 });
17653 return result;
17654 }
17655
17656 void speculate(
17657 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
17658 {
17659 appendOSRExit(kind, lowValue, highValue, failCondition, m_origin);
17660 }
17661
17662 void speculate(
17663 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition)
17664 {
17665 appendOSRExit(kind, lowValue, profile, failCondition, m_origin);
17666 }
17667
17668 template<typename... Args>
17669 void speculateAndJump(B3::BasicBlock* target, Args... args)
17670 {
17671 speculate(args...);
17672 m_out.jump(target);
17673 }
17674
17675 void terminate(ExitKind kind)
17676 {
17677 speculate(kind, noValue(), nullptr, m_out.booleanTrue);
17678 didAlreadyTerminate();
17679 }
17680
17681 void didAlreadyTerminate()
17682 {
17683 m_state.setIsValid(false);
17684 }
17685
17686 void simulatedTypeCheck(Edge highValue, SpeculatedType typesPassedThrough)
17687 {
17688 m_interpreter.filter(highValue, typesPassedThrough);
17689 }
17690
17691 void typeCheck(
17692 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
17693 LValue failCondition, ExitKind exitKind = BadType)
17694 {
17695 appendTypeCheck(lowValue, highValue, typesPassedThrough, failCondition, exitKind);
17696 }
17697
17698 void appendTypeCheck(
17699 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
17700 LValue failCondition, ExitKind exitKind)
17701 {
17702 if (!m_interpreter.needsTypeCheck(highValue, typesPassedThrough))
17703 return;
17704 ASSERT(mayHaveTypeCheck(highValue.useKind()));
17705 appendOSRExit(exitKind, lowValue, highValue.node(), failCondition, m_origin);
17706 m_interpreter.filter(highValue, typesPassedThrough);
17707 }
17708
17709 LValue lowInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17710 {
17711 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
17712
17713 if (edge->hasConstant()) {
17714 JSValue value = edge->asJSValue();
17715 simulatedTypeCheck(edge, SpecInt32Only);
17716 if (!value.isInt32()) {
17717 if (mayHaveTypeCheck(edge.useKind()))
17718 terminate(Uncountable);
17719 return m_out.int32Zero;
17720 }
17721 LValue result = m_out.constInt32(value.asInt32());
17722 result->setOrigin(B3::Origin(edge.node()));
17723 return result;
17724 }
17725
17726 LoweredNodeValue value = m_int32Values.get(edge.node());
17727 if (isValid(value)) {
17728 simulatedTypeCheck(edge, SpecInt32Only);
17729 return value.value();
17730 }
17731
17732 value = m_strictInt52Values.get(edge.node());
17733 if (isValid(value))
17734 return strictInt52ToInt32(edge, value.value());
17735
17736 value = m_int52Values.get(edge.node());
17737 if (isValid(value))
17738 return strictInt52ToInt32(edge, int52ToStrictInt52(value.value()));
17739
17740 value = m_jsValueValues.get(edge.node());
17741 if (isValid(value)) {
17742 LValue boxedResult = value.value();
17743 FTL_TYPE_CHECK(
17744 jsValueValue(boxedResult), edge, SpecInt32Only, isNotInt32(boxedResult));
17745 LValue result = unboxInt32(boxedResult);
17746 setInt32(edge.node(), result);
17747 return result;
17748 }
17749
17750 if (mayHaveTypeCheck(edge.useKind()))
17751 terminate(Uncountable);
17752 return m_out.int32Zero;
17753 }
17754
17755 enum Int52Kind { StrictInt52, Int52 };
17756 LValue lowInt52(Edge edge, Int52Kind kind)
17757 {
17758 DFG_ASSERT(m_graph, m_node, edge.useKind() == Int52RepUse, edge.useKind());
17759
17760 LoweredNodeValue value;
17761
17762 switch (kind) {
17763 case Int52:
17764 value = m_int52Values.get(edge.node());
17765 if (isValid(value))
17766 return value.value();
17767
17768 value = m_strictInt52Values.get(edge.node());
17769 if (isValid(value))
17770 return strictInt52ToInt52(value.value());
17771 break;
17772
17773 case StrictInt52:
17774 value = m_strictInt52Values.get(edge.node());
17775 if (isValid(value))
17776 return value.value();
17777
17778 value = m_int52Values.get(edge.node());
17779 if (isValid(value))
17780 return int52ToStrictInt52(value.value());
17781 break;
17782 }
17783
17784 if (mayHaveTypeCheck(edge.useKind()))
17785 terminate(Uncountable);
17786 return m_out.int64Zero;
17787 }
17788
17789 LValue lowInt52(Edge edge)
17790 {
17791 return lowInt52(edge, Int52);
17792 }
17793
17794 LValue lowStrictInt52(Edge edge)
17795 {
17796 return lowInt52(edge, StrictInt52);
17797 }
17798
17799 bool betterUseStrictInt52(Node* node)
17800 {
17801 return !isValid(m_int52Values.get(node));
17802 }
17803 bool betterUseStrictInt52(Edge edge)
17804 {
17805 return betterUseStrictInt52(edge.node());
17806 }
17807 template<typename T>
17808 Int52Kind bestInt52Kind(T node)
17809 {
17810 return betterUseStrictInt52(node) ? StrictInt52 : Int52;
17811 }
17812 Int52Kind opposite(Int52Kind kind)
17813 {
17814 switch (kind) {
17815 case Int52:
17816 return StrictInt52;
17817 case StrictInt52:
17818 return Int52;
17819 }
17820 DFG_CRASH(m_graph, m_node, "Bad use kind");
17821 return Int52;
17822 }
17823
17824 LValue lowWhicheverInt52(Edge edge, Int52Kind& kind)
17825 {
17826 kind = bestInt52Kind(edge);
17827 return lowInt52(edge, kind);
17828 }
17829
17830 LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17831 {
17832 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()), edge.useKind());
17833
17834 if (edge->op() == JSConstant) {
17835 FrozenValue* value = edge->constant();
17836 simulatedTypeCheck(edge, SpecCellCheck);
17837 if (!value->value().isCell()) {
17838 if (mayHaveTypeCheck(edge.useKind()))
17839 terminate(Uncountable);
17840 return m_out.intPtrZero;
17841 }
17842 LValue result = frozenPointer(value);
17843 result->setOrigin(B3::Origin(edge.node()));
17844 return result;
17845 }
17846
17847 LoweredNodeValue value = m_jsValueValues.get(edge.node());
17848 if (isValid(value)) {
17849 LValue uncheckedValue = value.value();
17850 FTL_TYPE_CHECK(
17851 jsValueValue(uncheckedValue), edge, SpecCellCheck, isNotCell(uncheckedValue));
17852 return uncheckedValue;
17853 }
17854
17855 if (mayHaveTypeCheck(edge.useKind()))
17856 terminate(Uncountable);
17857 return m_out.intPtrZero;
17858 }
17859
17860 LValue lowObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17861 {
17862 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
17863
17864 LValue result = lowCell(edge, mode);
17865 speculateObject(edge, result);
17866 return result;
17867 }
17868
17869 LValue lowRegExpObject(Edge edge)
17870 {
17871 LValue result = lowCell(edge);
17872 speculateRegExpObject(edge, result);
17873 return result;
17874 }
17875
17876 LValue lowMapObject(Edge edge)
17877 {
17878 LValue result = lowCell(edge);
17879 speculateMapObject(edge, result);
17880 return result;
17881 }
17882
17883 LValue lowSetObject(Edge edge)
17884 {
17885 LValue result = lowCell(edge);
17886 speculateSetObject(edge, result);
17887 return result;
17888 }
17889
17890 LValue lowWeakMapObject(Edge edge)
17891 {
17892 LValue result = lowCell(edge);
17893 speculateWeakMapObject(edge, result);
17894 return result;
17895 }
17896
17897 LValue lowWeakSetObject(Edge edge)
17898 {
17899 LValue result = lowCell(edge);
17900 speculateWeakSetObject(edge, result);
17901 return result;
17902 }
17903
17904 LValue lowDataViewObject(Edge edge)
17905 {
17906 LValue result = lowCell(edge);
17907 speculateDataViewObject(edge, result);
17908 return result;
17909 }
17910
17911 LValue lowDateObject(Edge edge)
17912 {
17913 LValue result = lowCell(edge);
17914 speculateDateObject(edge, result);
17915 return result;
17916 }
17917
17918 LValue lowString(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17919 {
17920 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringUse || edge.useKind() == KnownStringUse || edge.useKind() == StringIdentUse);
17921
17922 LValue result = lowCell(edge, mode);
17923 speculateString(edge, result);
17924 return result;
17925 }
17926
17927 LValue lowStringIdent(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17928 {
17929 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringIdentUse);
17930
17931 LValue string = lowString(edge, mode);
17932 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
17933 speculateStringIdent(edge, string, stringImpl);
17934 return stringImpl;
17935 }
17936
17937 LValue lowSymbol(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17938 {
17939 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == SymbolUse);
17940
17941 LValue result = lowCell(edge, mode);
17942 speculateSymbol(edge, result);
17943 return result;
17944 }
17945
17946 LValue lowHeapBigInt(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17947 {
17948 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == HeapBigIntUse);
17949
17950 LValue result = lowCell(edge, mode);
17951 speculateHeapBigInt(edge, result);
17952 return result;
17953 }
17954
17955#if USE(BIGINT32)
17956 LValue lowBigInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17957 {
17958 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BigInt32Use);
17959
17960 LoweredNodeValue value = m_jsValueValues.get(edge.node());
17961 if (isValid(value)) {
17962 LValue result = value.value();
17963 FTL_TYPE_CHECK(jsValueValue(result), edge, SpecBigInt32, isNotBigInt32(result));
17964 return result;
17965 }
17966
17967 if (mayHaveTypeCheck(edge.useKind()))
17968 terminate(Uncountable);
17969 return m_out.bigInt32Zero;
17970 }
17971#endif
17972
17973 LValue lowNonNullObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17974 {
17975 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
17976
17977 LValue result = lowCell(edge, mode);
17978 speculateNonNullObject(edge, result);
17979 return result;
17980 }
17981
17982 LValue lowBoolean(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
17983 {
17984 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse);
17985
17986 if (edge->hasConstant()) {
17987 JSValue value = edge->asJSValue();
17988 simulatedTypeCheck(edge, SpecBoolean);
17989 if (!value.isBoolean()) {
17990 if (mayHaveTypeCheck(edge.useKind()))
17991 terminate(Uncountable);
17992 return m_out.booleanFalse;
17993 }
17994 LValue result = m_out.constBool(value.asBoolean());
17995 result->setOrigin(B3::Origin(edge.node()));
17996 return result;
17997 }
17998
17999 LoweredNodeValue value = m_booleanValues.get(edge.node());
18000 if (isValid(value)) {
18001 simulatedTypeCheck(edge, SpecBoolean);
18002 return value.value();
18003 }
18004
18005 value = m_jsValueValues.get(edge.node());
18006 if (isValid(value)) {
18007 LValue unboxedResult = value.value();
18008 FTL_TYPE_CHECK(
18009 jsValueValue(unboxedResult), edge, SpecBoolean, isNotBoolean(unboxedResult));
18010 LValue result = unboxBoolean(unboxedResult);
18011 setBoolean(edge.node(), result);
18012 return result;
18013 }
18014
18015 if (mayHaveTypeCheck(edge.useKind()))
18016 terminate(Uncountable);
18017 return m_out.booleanFalse;
18018 }
18019
18020 LValue lowDouble(Edge edge)
18021 {
18022 DFG_ASSERT(m_graph, m_node, isDouble(edge.useKind()), edge.useKind());
18023
18024 LoweredNodeValue value = m_doubleValues.get(edge.node());
18025 if (isValid(value))
18026 return value.value();
18027 if (mayHaveTypeCheck(edge.useKind()))
18028 terminate(Uncountable);
18029 return m_out.doubleZero;
18030 }
18031
18032 LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
18033 {
18034 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse, m_node->op(), edge.useKind());
18035 DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind()), m_node->op(), edge.useKind());
18036 DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse, m_node->op(), edge.useKind());
18037
18038 if (edge->hasConstant()) {
18039 LValue result = m_out.constInt64(JSValue::encode(edge->asJSValue()));
18040 result->setOrigin(B3::Origin(edge.node()));
18041 return result;
18042 }
18043
18044 LoweredNodeValue value = m_jsValueValues.get(edge.node());
18045 if (isValid(value))
18046 return value.value();
18047
18048 value = m_int32Values.get(edge.node());
18049 if (isValid(value)) {
18050 LValue result = boxInt32(value.value());
18051 setJSValue(edge.node(), result);
18052 return result;
18053 }
18054
18055 value = m_booleanValues.get(edge.node());
18056 if (isValid(value)) {
18057 LValue result = boxBoolean(value.value());
18058 setJSValue(edge.node(), result);
18059 return result;
18060 }
18061
18062 DFG_CRASH(m_graph, m_node, makeString("Value not defined: ", String::number(edge.node()->index())).ascii().data());
18063 return nullptr;
18064 }
18065
18066 LValue lowNotCell(Edge edge)
18067 {
18068 LValue result = lowJSValue(edge, ManualOperandSpeculation);
18069 FTL_TYPE_CHECK(jsValueValue(result), edge, ~SpecCellCheck, isCell(result));
18070 return result;
18071 }
18072
18073 LValue lowStorage(Edge edge)
18074 {
18075 LoweredNodeValue value = m_storageValues.get(edge.node());
18076 if (isValid(value))
18077 return value.value();
18078
18079 LValue result = lowCell(edge);
18080 setStorage(edge.node(), result);
18081 return result;
18082 }
18083
18084 LValue strictInt52ToInt32(Edge edge, LValue value)
18085 {
18086 LValue result = m_out.castToInt32(value);
18087 FTL_TYPE_CHECK(
18088 noValue(), edge, SpecInt32Only,
18089 m_out.notEqual(m_out.signExt32To64(result), value));
18090 setInt32(edge.node(), result);
18091 return result;
18092 }
18093
18094 LValue strictInt52ToDouble(LValue value)
18095 {
18096 return m_out.intToDouble(value);
18097 }
18098
18099 LValue strictInt52ToJSValue(LValue value)
18100 {
18101 LBasicBlock isInt32 = m_out.newBlock();
18102 LBasicBlock isDouble = m_out.newBlock();
18103 LBasicBlock continuation = m_out.newBlock();
18104
18105 Vector<ValueFromBlock, 2> results;
18106
18107 LValue int32Value = m_out.castToInt32(value);
18108 m_out.branch(
18109 m_out.equal(m_out.signExt32To64(int32Value), value),
18110 unsure(isInt32), unsure(isDouble));
18111
18112 LBasicBlock lastNext = m_out.appendTo(isInt32, isDouble);
18113
18114 results.append(m_out.anchor(boxInt32(int32Value)));
18115 m_out.jump(continuation);
18116
18117 m_out.appendTo(isDouble, continuation);
18118
18119 results.append(m_out.anchor(boxDouble(m_out.intToDouble(value))));
18120 m_out.jump(continuation);
18121
18122 m_out.appendTo(continuation, lastNext);
18123 return m_out.phi(Int64, results);
18124 }
18125
18126 LValue strictInt52ToInt52(LValue value)
18127 {
18128 return m_out.shl(value, m_out.constInt64(JSValue::int52ShiftAmount));
18129 }
18130
18131 LValue int52ToStrictInt52(LValue value)
18132 {
18133 return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount));
18134 }
18135
18136 LValue isInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
18137 {
18138 if (LValue proven = isProvenValue(type, SpecInt32Only))
18139 return proven;
18140 return m_out.aboveOrEqual(jsValue, m_numberTag);
18141 }
18142 LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
18143 {
18144 if (LValue proven = isProvenValue(type, ~SpecInt32Only))
18145 return proven;
18146 return m_out.below(jsValue, m_numberTag);
18147 }
18148 LValue unboxInt32(LValue jsValue)
18149 {
18150 return m_out.castToInt32(jsValue);
18151 }
18152 LValue boxInt32(LValue value)
18153 {
18154 return m_out.add(m_out.zeroExt(value, Int64), m_numberTag);
18155 }
18156
18157#if USE(BIGINT32)
18158 LValue isBigInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
18159 {
18160 if (LValue proven = isProvenValue(type, SpecBigInt32))
18161 return proven;
18162 return m_out.equal(m_out.bitAnd(jsValue, m_out.constInt64(JSValue::BigInt32Mask)), m_out.constInt64(JSValue::BigInt32Tag));
18163 }
18164 LValue isNotBigInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
18165 {
18166 if (LValue proven = isProvenValue(type, ~SpecBigInt32))
18167 return proven;
18168 return m_out.notEqual(m_out.bitAnd(jsValue, m_out.constInt64(JSValue::BigInt32Mask)), m_out.constInt64(JSValue::BigInt32Tag));
18169 }
18170 LValue unboxBigInt32(LValue jsValue)
18171 {
18172 return m_out.castToInt32(m_out.lShr(jsValue, m_out.constInt64(16)));
18173 }
18174 LValue boxBigInt32(LValue int32Value)
18175 {
18176 return m_out.bitOr(
18177 m_out.shl(m_out.zeroExt(int32Value, B3::Int64), m_out.constInt64(16)),
18178 m_out.constInt64(JSValue::BigInt32Tag));
18179 }
18180 LValue isNotAnyBigInt(LValue jsValue, SpeculatedType type = SpecFullTop)
18181 {
18182 if (LValue proven = isProvenValue(type, ~SpecBigInt))
18183 return proven;
18184
18185 // if (isBigInt32)
18186 // return false
18187 // if (!isCell)
18188 // return true;
18189 // return !isHeapBigInt
18190 LBasicBlock isBigInt32Case = m_out.newBlock();
18191 LBasicBlock isNotBigInt32Case = m_out.newBlock();
18192 LBasicBlock isNotCellCase = m_out.newBlock();
18193 LBasicBlock isCellCase = m_out.newBlock();
18194 LBasicBlock continuation = m_out.newBlock();
18195
18196 m_out.branch(isBigInt32(jsValue, type), unsure(isBigInt32Case), unsure(isNotBigInt32Case));
18197
18198 LBasicBlock lastNext = m_out.appendTo(isBigInt32Case, isNotBigInt32Case);
18199 ValueFromBlock returnFalse = m_out.anchor(m_out.booleanFalse);
18200 m_out.jump(continuation);
18201
18202 m_out.appendTo(isNotBigInt32Case, isNotCellCase);
18203 // FIXME: we should filter the type passed to isCell to account for the previous test that told us we are definitely not a BigInt32.
18204 m_out.branch(isCell(jsValue, type), unsure(isCellCase), unsure(isNotCellCase));
18205
18206 m_out.appendTo(isNotCellCase, isCellCase);
18207 ValueFromBlock returnTrue = m_out.anchor(m_out.booleanTrue);
18208 m_out.jump(continuation);
18209
18210 m_out.appendTo(isCellCase, continuation);
18211 ValueFromBlock returnIsNotHeapBigInt = m_out.anchor(isNotHeapBigInt(jsValue));
18212 m_out.jump(continuation);
18213
18214 m_out.appendTo(continuation, lastNext);
18215 return m_out.phi(Int32, returnFalse, returnTrue, returnIsNotHeapBigInt);
18216 }
18217#endif // USE(BIGINT32)
18218
18219 LValue isCellOrMiscOrBigInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
18220 {
18221 SpeculatedType filter = SpecCellCheck | SpecMisc;
18222#if USE(BIGINT32)
18223 filter |= SpecBigInt32;
18224#endif // USE(BIGINT32)
18225 if (LValue proven = isProvenValue(type, filter))
18226 return proven;
18227 return m_out.testIsZero64(jsValue, m_numberTag);
18228 }
18229 LValue isNotCellOrMiscOrBigInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
18230 {
18231 SpeculatedType filter = SpecCellCheck | SpecMisc;
18232#if USE(BIGINT32)
18233 filter |= SpecBigInt32;
18234#endif // USE(BIGINT32)
18235 if (LValue proven = isProvenValue(type, ~filter))
18236 return proven;
18237 return m_out.testNonZero64(jsValue, m_numberTag);
18238 }
18239
18240 LValue unboxDouble(LValue jsValue, LValue* unboxedAsInt = nullptr)
18241 {
18242 LValue asInt = m_out.add(jsValue, m_numberTag);
18243 if (unboxedAsInt)
18244 *unboxedAsInt = asInt;
18245 return m_out.bitCast(asInt, Double);
18246 }
18247 LValue boxDouble(LValue doubleValue)
18248 {
18249 return m_out.sub(m_out.bitCast(doubleValue, Int64), m_numberTag);
18250 }
18251
18252 LValue jsValueToStrictInt52(Edge edge, LValue boxedValue)
18253 {
18254 LBasicBlock intCase = m_out.newBlock();
18255 LBasicBlock doubleCase = m_out.newBlock();
18256 LBasicBlock continuation = m_out.newBlock();
18257
18258 LValue isNotInt32;
18259 if (!m_interpreter.needsTypeCheck(edge, SpecInt32Only))
18260 isNotInt32 = m_out.booleanFalse;
18261 else if (!m_interpreter.needsTypeCheck(edge, ~SpecInt32Only))
18262 isNotInt32 = m_out.booleanTrue;
18263 else
18264 isNotInt32 = this->isNotInt32(boxedValue);
18265 m_out.branch(isNotInt32, unsure(doubleCase), unsure(intCase));
18266
18267 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
18268
18269 ValueFromBlock intToInt52 = m_out.anchor(
18270 m_out.signExt32To64(unboxInt32(boxedValue)));
18271 m_out.jump(continuation);
18272
18273 m_out.appendTo(doubleCase, continuation);
18274
18275 LValue possibleResult = m_out.callWithoutSideEffects(Int64, operationConvertBoxedDoubleToInt52, boxedValue);
18276 FTL_TYPE_CHECK(
18277 jsValueValue(boxedValue), edge, SpecInt32Only | SpecAnyIntAsDouble,
18278 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
18279
18280 ValueFromBlock doubleToInt52 = m_out.anchor(possibleResult);
18281 m_out.jump(continuation);
18282
18283 m_out.appendTo(continuation, lastNext);
18284
18285 return m_out.phi(Int64, intToInt52, doubleToInt52);
18286 }
18287
18288 LValue doubleToStrictInt52(Edge edge, LValue value)
18289 {
18290 LValue integerValue = m_out.doubleToInt64(value);
18291 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
18292 LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
18293 speculate(Int52Overflow, doubleValue(value), edge.node(), valueNotConvertibleToInteger);
18294
18295 LBasicBlock valueIsZero = m_out.newBlock();
18296 LBasicBlock valueIsNotZero = m_out.newBlock();
18297 LBasicBlock continuation = m_out.newBlock();
18298 m_out.branch(m_out.isZero64(integerValue), unsure(valueIsZero), unsure(valueIsNotZero));
18299
18300 LBasicBlock lastNext = m_out.appendTo(valueIsZero, valueIsNotZero);
18301 LValue doubleBitcastToInt64 = m_out.bitCast(value, Int64);
18302 LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
18303 speculate(Int52Overflow, doubleValue(value), edge.node(), signBitSet);
18304 m_out.jump(continuation);
18305
18306 m_out.appendTo(valueIsNotZero, continuation);
18307 speculate(Int52Overflow, doubleValue(value), edge.node(), m_out.greaterThanOrEqual(integerValue, m_out.constInt64(static_cast<int64_t>(1) << (JSValue::numberOfInt52Bits - 1))));
18308 speculate(Int52Overflow, doubleValue(value), edge.node(), m_out.lessThan(integerValue, m_out.constInt64(-(static_cast<int64_t>(1) << (JSValue::numberOfInt52Bits - 1)))));
18309 m_out.jump(continuation);
18310
18311 m_out.appendTo(continuation, lastNext);
18312 m_interpreter.filter(edge, SpecAnyIntAsDouble);
18313 return integerValue;
18314 }
18315
18316 LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero)
18317 {
18318 LValue integerValue = m_out.doubleToInt(value);
18319 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
18320 LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
18321 speculate(Overflow, FormattedValue(DataFormatDouble, value), m_node, valueNotConvertibleToInteger);
18322
18323 if (shouldCheckNegativeZero) {
18324 LBasicBlock valueIsZero = m_out.newBlock();
18325 LBasicBlock continuation = m_out.newBlock();
18326 m_out.branch(m_out.isZero32(integerValue), unsure(valueIsZero), unsure(continuation));
18327
18328 LBasicBlock lastNext = m_out.appendTo(valueIsZero, continuation);
18329
18330 LValue doubleBitcastToInt64 = m_out.bitCast(value, Int64);
18331 LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
18332
18333 speculate(NegativeZero, FormattedValue(DataFormatDouble, value), m_node, signBitSet);
18334 m_out.jump(continuation);
18335 m_out.appendTo(continuation, lastNext);
18336 }
18337 return integerValue;
18338 }
18339
18340 LValue isNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
18341 {
18342 if (LValue proven = isProvenValue(type, SpecFullNumber))
18343 return proven;
18344 return isNotCellOrMiscOrBigInt32(jsValue);
18345 }
18346 LValue isNotNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
18347 {
18348 if (LValue proven = isProvenValue(type, ~SpecFullNumber))
18349 return proven;
18350 return isCellOrMiscOrBigInt32(jsValue);
18351 }
18352
18353 LValue isNotCell(LValue jsValue, SpeculatedType type = SpecFullTop)
18354 {
18355 if (LValue proven = isProvenValue(type, ~SpecCellCheck))
18356 return proven;
18357 return m_out.testNonZero64(jsValue, m_notCellMask);
18358 }
18359
18360 LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop)
18361 {
18362 if (LValue proven = isProvenValue(type, SpecCellCheck))
18363 return proven;
18364 return m_out.testIsZero64(jsValue, m_notCellMask);
18365 }
18366
18367 LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop)
18368 {
18369 if (LValue proven = isProvenValue(type, ~SpecMisc))
18370 return proven;
18371 return m_out.above(value, m_out.constInt64(JSValue::MiscTag));
18372 }
18373
18374 LValue isMisc(LValue value, SpeculatedType type = SpecFullTop)
18375 {
18376 if (LValue proven = isProvenValue(type, SpecMisc))
18377 return proven;
18378 return m_out.logicalNot(isNotMisc(value));
18379 }
18380
18381 LValue isNotBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
18382 {
18383 if (LValue proven = isProvenValue(type, ~SpecBoolean))
18384 return proven;
18385 return m_out.testNonZero64(
18386 m_out.bitXor(jsValue, m_out.constInt64(JSValue::ValueFalse)),
18387 m_out.constInt64(~1));
18388 }
18389 LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
18390 {
18391 if (LValue proven = isProvenValue(type, SpecBoolean))
18392 return proven;
18393 return m_out.logicalNot(isNotBoolean(jsValue));
18394 }
18395 LValue unboxBoolean(LValue jsValue)
18396 {
18397 // We want to use a cast that guarantees that B3 knows that even the integer
18398 // value is just 0 or 1. But for now we do it the direct way.
18399 return m_out.notZero64(m_out.bitAnd(jsValue, m_out.constInt64(1)));
18400 }
18401 LValue boxBoolean(LValue value)
18402 {
18403 return m_out.select(
18404 value, m_out.constInt64(JSValue::ValueTrue), m_out.constInt64(JSValue::ValueFalse));
18405 }
18406
18407 LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop)
18408 {
18409 if (LValue proven = isProvenValue(type, ~SpecOther))
18410 return proven;
18411 return m_out.notEqual(
18412 m_out.bitAnd(value, m_out.constInt64(~JSValue::UndefinedTag)),
18413 m_out.constInt64(JSValue::ValueNull));
18414 }
18415 LValue isOther(LValue value, SpeculatedType type = SpecFullTop)
18416 {
18417 if (LValue proven = isProvenValue(type, SpecOther))
18418 return proven;
18419 return m_out.equal(
18420 m_out.bitAnd(value, m_out.constInt64(~JSValue::UndefinedTag)),
18421 m_out.constInt64(JSValue::ValueNull));
18422 }
18423
18424 LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType)
18425 {
18426 if (!(provenType & ~wantedType))
18427 return m_out.booleanTrue;
18428 if (!(provenType & wantedType))
18429 return m_out.booleanFalse;
18430 return nullptr;
18431 }
18432
18433 void speculate(Edge edge)
18434 {
18435 switch (edge.useKind()) {
18436 case UntypedUse:
18437 break;
18438 case KnownInt32Use:
18439 case KnownStringUse:
18440 case KnownPrimitiveUse:
18441 case KnownOtherUse:
18442 case DoubleRepUse:
18443 case Int52RepUse:
18444 case KnownCellUse:
18445 case KnownBooleanUse:
18446 ASSERT(!m_interpreter.needsTypeCheck(edge));
18447 break;
18448 case Int32Use:
18449 speculateInt32(edge);
18450 break;
18451 case CellUse:
18452 speculateCell(edge);
18453 break;
18454 case CellOrOtherUse:
18455 speculateCellOrOther(edge);
18456 break;
18457 case AnyIntUse:
18458 speculateAnyInt(edge);
18459 break;
18460 case ObjectUse:
18461 speculateObject(edge);
18462 break;
18463 case ArrayUse:
18464 speculateArray(edge);
18465 break;
18466 case FunctionUse:
18467 speculateFunction(edge);
18468 break;
18469 case ObjectOrOtherUse:
18470 speculateObjectOrOther(edge);
18471 break;
18472 case FinalObjectUse:
18473 speculateFinalObject(edge);
18474 break;
18475 case RegExpObjectUse:
18476 speculateRegExpObject(edge);
18477 break;
18478 case PromiseObjectUse:
18479 speculatePromiseObject(edge);
18480 break;
18481 case ProxyObjectUse:
18482 speculateProxyObject(edge);
18483 break;
18484 case DerivedArrayUse:
18485 speculateDerivedArray(edge);
18486 break;
18487 case DateObjectUse:
18488 speculateDateObject(edge);
18489 break;
18490 case MapObjectUse:
18491 speculateMapObject(edge);
18492 break;
18493 case SetObjectUse:
18494 speculateSetObject(edge);
18495 break;
18496 case WeakMapObjectUse:
18497 speculateWeakMapObject(edge);
18498 break;
18499 case WeakSetObjectUse:
18500 speculateWeakSetObject(edge);
18501 break;
18502 case DataViewObjectUse:
18503 speculateDataViewObject(edge);
18504 break;
18505 case StringUse:
18506 speculateString(edge);
18507 break;
18508 case StringOrOtherUse:
18509 speculateStringOrOther(edge);
18510 break;
18511 case StringIdentUse:
18512 speculateStringIdent(edge);
18513 break;
18514 case SymbolUse:
18515 speculateSymbol(edge);
18516 break;
18517 case StringObjectUse:
18518 speculateStringObject(edge);
18519 break;
18520 case StringOrStringObjectUse:
18521 speculateStringOrStringObject(edge);
18522 break;
18523 case NumberUse:
18524 speculateNumber(edge);
18525 break;
18526 case RealNumberUse:
18527 speculateRealNumber(edge);
18528 break;
18529 case DoubleRepRealUse:
18530 speculateDoubleRepReal(edge);
18531 break;
18532 case DoubleRepAnyIntUse:
18533 speculateDoubleRepAnyInt(edge);
18534 break;
18535 case BooleanUse:
18536 speculateBoolean(edge);
18537 break;
18538#if USE(BIGINT32)
18539 case BigInt32Use:
18540 speculateBigInt32(edge);
18541 break;
18542 case AnyBigIntUse:
18543 speculateAnyBigInt(edge);
18544 break;
18545#endif // USE(BIGINT32)
18546 case HeapBigIntUse:
18547 speculateHeapBigInt(edge);
18548 break;
18549 case NotStringVarUse:
18550 speculateNotStringVar(edge);
18551 break;
18552 case NotSymbolUse:
18553 speculateNotSymbol(edge);
18554 break;
18555 case NotCellUse:
18556 speculateNotCell(edge);
18557 break;
18558 case NotCellNorBigIntUse:
18559 speculateNotCellNorBigInt(edge);
18560 break;
18561 case NotDoubleUse:
18562 speculateNotDouble(edge);
18563 break;
18564 case NeitherDoubleNorHeapBigIntNorStringUse:
18565 speculateNeitherDoubleNorHeapBigIntNorString(edge);
18566 break;
18567 case OtherUse:
18568 speculateOther(edge);
18569 break;
18570 case MiscUse:
18571 speculateMisc(edge);
18572 break;
18573 default:
18574 DFG_CRASH(m_graph, m_node, "Unsupported speculation use kind");
18575 }
18576 }
18577
18578 void speculate(Node*, Edge edge)
18579 {
18580 speculate(edge);
18581 }
18582
18583 void speculateInt32(Edge edge)
18584 {
18585 lowInt32(edge);
18586 }
18587
18588 void speculateCell(Edge edge)
18589 {
18590 lowCell(edge);
18591 }
18592
18593 void speculateNotCell(Edge edge)
18594 {
18595 if (!m_interpreter.needsTypeCheck(edge))
18596 return;
18597 lowNotCell(edge);
18598 }
18599
18600 void speculateNotCellNorBigInt(Edge edge)
18601 {
18602#if USE(BIGINT32)
18603 if (!m_interpreter.needsTypeCheck(edge))
18604 return;
18605 LValue nonCell = lowNotCell(edge);
18606 FTL_TYPE_CHECK(jsValueValue(nonCell), edge, ~SpecCellCheck & ~SpecBigInt, isBigInt32(nonCell));
18607#else
18608 speculateNotCell(edge);
18609#endif
18610 }
18611
18612 void speculateNotDouble(Edge edge)
18613 {
18614 if (!m_interpreter.needsTypeCheck(edge))
18615 return;
18616
18617 LValue value = lowJSValue(edge, ManualOperandSpeculation);
18618
18619 LBasicBlock isNotInt32 = m_out.newBlock();
18620 LBasicBlock continuation = m_out.newBlock();
18621
18622 m_out.branch(isInt32(value, provenType(edge)), unsure(continuation), unsure(isNotInt32));
18623
18624 LBasicBlock lastNext = m_out.appendTo(isNotInt32, continuation);
18625 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecFullDouble, isNumber(value));
18626 m_out.jump(continuation);
18627
18628 m_out.appendTo(continuation, lastNext);
18629 }
18630
18631 void speculateNeitherDoubleNorHeapBigIntNorString(Edge edge)
18632 {
18633 if (!m_interpreter.needsTypeCheck(edge))
18634 return;
18635
18636 LValue value = lowJSValue(edge, ManualOperandSpeculation);
18637
18638 LBasicBlock isNotInt32 = m_out.newBlock();
18639 LBasicBlock isCellBlock = m_out.newBlock();
18640 LBasicBlock continuation = m_out.newBlock();
18641
18642 m_out.branch(isInt32(value, provenType(edge)), unsure(continuation), unsure(isNotInt32));
18643
18644 LBasicBlock lastNext = m_out.appendTo(isNotInt32, isCellBlock);
18645 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecFullDouble, isNumber(value));
18646 m_out.branch(isCell(value, provenType(edge)), unsure(isCellBlock), unsure(continuation));
18647
18648 m_out.appendTo(isCellBlock, continuation);
18649 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecString, isString(value));
18650 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecHeapBigInt, isHeapBigInt(value));
18651 m_out.jump(continuation);
18652
18653 m_out.appendTo(continuation, lastNext);
18654 }
18655
18656 void speculateCellOrOther(Edge edge)
18657 {
18658 if (shouldNotHaveTypeCheck(edge.useKind()))
18659 return;
18660
18661 LValue value = lowJSValue(edge, ManualOperandSpeculation);
18662
18663 LBasicBlock isNotCell = m_out.newBlock();
18664 LBasicBlock continuation = m_out.newBlock();
18665
18666 m_out.branch(isCell(value, provenType(edge)), unsure(continuation), unsure(isNotCell));
18667
18668 LBasicBlock lastNext = m_out.appendTo(isNotCell, continuation);
18669 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
18670 m_out.jump(continuation);
18671
18672 m_out.appendTo(continuation, lastNext);
18673 }
18674
18675 void speculateAnyInt(Edge edge)
18676 {
18677 if (!m_interpreter.needsTypeCheck(edge))
18678 return;
18679
18680 jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation));
18681 }
18682
18683 LValue isCellWithType(LValue cell, JSTypeRange queriedTypeRange, std::optional<SpeculatedType> speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
18684 {
18685 if (speculatedTypeForQuery) {
18686 if (LValue proven = isProvenValue(type & SpecCell, speculatedTypeForQuery.value()))
18687 return proven;
18688 }
18689 if (queriedTypeRange.first == queriedTypeRange.last) {
18690 return m_out.equal(
18691 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
18692 m_out.constInt32(queriedTypeRange.first));
18693 }
18694
18695 ASSERT(queriedTypeRange.last > queriedTypeRange.first);
18696 LValue first = m_out.sub(
18697 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
18698 m_out.constInt32(queriedTypeRange.first));
18699 return m_out.belowOrEqual(first, m_out.constInt32(queriedTypeRange.last - queriedTypeRange.first));
18700 }
18701
18702 LValue isCellWithType(LValue cell, JSType queriedType, std::optional<SpeculatedType> speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
18703 {
18704 return isCellWithType(cell, JSTypeRange { queriedType, queriedType }, speculatedTypeForQuery, type);
18705 }
18706
18707 LValue isTypedArrayView(LValue cell, SpeculatedType type = SpecFullTop)
18708 {
18709 return isCellWithType(cell, JSTypeRange { static_cast<JSType>(FirstTypedArrayType), static_cast<JSType>(LastTypedArrayTypeExcludingDataView) }, SpecTypedArrayView, type);
18710 }
18711
18712 LValue isObject(LValue cell, SpeculatedType type = SpecFullTop)
18713 {
18714 if (LValue proven = isProvenValue(type & SpecCell, SpecObject))
18715 return proven;
18716 return m_out.aboveOrEqual(
18717 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
18718 m_out.constInt32(ObjectType));
18719 }
18720
18721 LValue isNotObject(LValue cell, SpeculatedType type = SpecFullTop)
18722 {
18723 if (LValue proven = isProvenValue(type & SpecCell, ~SpecObject))
18724 return proven;
18725 return m_out.below(
18726 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
18727 m_out.constInt32(ObjectType));
18728 }
18729
18730 LValue isNotString(LValue cell, SpeculatedType type = SpecFullTop)
18731 {
18732 if (LValue proven = isProvenValue(type & SpecCell, ~SpecString))
18733 return proven;
18734 return m_out.notEqual(
18735 m_out.load32(cell, m_heaps.JSCell_structureID),
18736 m_out.constInt32(vm().stringStructure->id()));
18737 }
18738
18739 LValue isString(LValue cell, SpeculatedType type = SpecFullTop)
18740 {
18741 if (LValue proven = isProvenValue(type & SpecCell, SpecString))
18742 return proven;
18743 return m_out.equal(
18744 m_out.load32(cell, m_heaps.JSCell_structureID),
18745 m_out.constInt32(vm().stringStructure->id()));
18746 }
18747
18748 LValue isRopeString(LValue string, Edge edge = Edge())
18749 {
18750 if (edge) {
18751 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
18752 return m_out.booleanFalse;
18753 if (JSValue value = provenValue(edge)) {
18754 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
18755 return m_out.booleanFalse;
18756 }
18757 String value = edge->tryGetString(m_graph);
18758 if (!value.isNull()) {
18759 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
18760 return m_out.booleanFalse;
18761 }
18762 }
18763
18764 return m_out.testNonZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
18765 }
18766
18767 LValue isNotRopeString(LValue string, Edge edge = Edge())
18768 {
18769 if (edge) {
18770 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
18771 return m_out.booleanTrue;
18772 if (JSValue value = provenValue(edge)) {
18773 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
18774 return m_out.booleanTrue;
18775 }
18776 String value = edge->tryGetString(m_graph);
18777 if (!value.isNull()) {
18778 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
18779 return m_out.booleanTrue;
18780 }
18781 }
18782
18783 return m_out.testIsZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
18784 }
18785
18786 LValue isNotSymbol(LValue cell, SpeculatedType type = SpecFullTop)
18787 {
18788 if (LValue proven = isProvenValue(type & SpecCell, ~SpecSymbol))
18789 return proven;
18790 return m_out.notEqual(
18791 m_out.load32(cell, m_heaps.JSCell_structureID),
18792 m_out.constInt32(vm().symbolStructure->id()));
18793 }
18794
18795 LValue isSymbol(LValue cell, SpeculatedType type = SpecFullTop)
18796 {
18797 if (LValue proven = isProvenValue(type & SpecCell, SpecSymbol))
18798 return proven;
18799 return m_out.equal(
18800 m_out.load32(cell, m_heaps.JSCell_structureID),
18801 m_out.constInt32(vm().symbolStructure->id()));
18802 }
18803
18804 LValue isNotHeapBigIntUnknownWhetherCell(LValue value, SpeculatedType type = SpecFullTop)
18805 {
18806 if (LValue proven = isProvenValue(type, ~SpecHeapBigInt))
18807 return proven;
18808
18809 LBasicBlock isCellCase = m_out.newBlock();
18810 LBasicBlock continuation = m_out.newBlock();
18811
18812 ValueFromBlock defaultToTrue = m_out.anchor(m_out.booleanTrue);
18813 m_out.branch(isCell(value, type), unsure(isCellCase), unsure(continuation));
18814
18815 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
18816 ValueFromBlock returnForCell = m_out.anchor(isNotHeapBigInt(value, type));
18817 m_out.jump(continuation);
18818
18819 m_out.appendTo(continuation, lastNext);
18820 LValue result = m_out.phi(Int32, defaultToTrue, returnForCell);
18821 return result;
18822 }
18823
18824 LValue isNotHeapBigInt(LValue cell, SpeculatedType type = SpecFullTop)
18825 {
18826 if (LValue proven = isProvenValue(type & SpecCell, ~SpecHeapBigInt))
18827 return proven;
18828 return m_out.notEqual(
18829 m_out.load32(cell, m_heaps.JSCell_structureID),
18830 m_out.constInt32(vm().bigIntStructure->id()));
18831 }
18832
18833 LValue isHeapBigInt(LValue cell, SpeculatedType type = SpecFullTop)
18834 {
18835 if (LValue proven = isProvenValue(type & SpecCell, SpecHeapBigInt))
18836 return proven;
18837 return m_out.equal(
18838 m_out.load32(cell, m_heaps.JSCell_structureID),
18839 m_out.constInt32(vm().bigIntStructure->id()));
18840 }
18841
18842 LValue isArrayTypeForArrayify(LValue cell, ArrayMode arrayMode)
18843 {
18844 switch (arrayMode.type()) {
18845 case Array::Int32:
18846 case Array::Double:
18847 case Array::Contiguous:
18848 case Array::Undecided:
18849 case Array::ArrayStorage: {
18850 IndexingType indexingModeMask = IsArray | IndexingShapeMask;
18851 if (arrayMode.action() == Array::Write)
18852 indexingModeMask |= CopyOnWrite;
18853
18854 IndexingType shape = arrayMode.shapeMask();
18855 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
18856
18857 switch (arrayMode.arrayClass()) {
18858 case Array::OriginalArray:
18859 case Array::OriginalCopyOnWriteArray:
18860 DFG_CRASH(m_graph, m_node, "Unexpected original array");
18861 return nullptr;
18862
18863 case Array::Array:
18864 return m_out.equal(
18865 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
18866 m_out.constInt32(IsArray | shape));
18867
18868 case Array::NonArray:
18869 case Array::OriginalNonArray:
18870 return m_out.equal(
18871 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
18872 m_out.constInt32(shape));
18873
18874 case Array::PossiblyArray:
18875 return m_out.equal(
18876 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask & ~IsArray)),
18877 m_out.constInt32(shape));
18878 }
18879 break;
18880 }
18881
18882 case Array::SlowPutArrayStorage: {
18883 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
18884 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
18885
18886 LBasicBlock trueCase = m_out.newBlock();
18887 LBasicBlock checkCase = m_out.newBlock();
18888 LBasicBlock continuation = m_out.newBlock();
18889
18890 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
18891 LValue isAnArrayStorageShape = m_out.belowOrEqual(
18892 m_out.sub(
18893 m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)),
18894 m_out.constInt32(ArrayStorageShape)),
18895 m_out.constInt32(SlowPutArrayStorageShape - ArrayStorageShape));
18896 m_out.branch(isAnArrayStorageShape, unsure(checkCase), unsure(continuation));
18897
18898 LBasicBlock lastNext = m_out.appendTo(checkCase, trueCase);
18899 switch (arrayMode.arrayClass()) {
18900 case Array::OriginalArray:
18901 case Array::OriginalCopyOnWriteArray:
18902 DFG_CRASH(m_graph, m_node, "Unexpected original array");
18903 return nullptr;
18904
18905 case Array::Array:
18906 m_out.branch(
18907 m_out.testNonZero32(indexingType, m_out.constInt32(IsArray)),
18908 unsure(trueCase), unsure(continuation));
18909 break;
18910
18911 case Array::NonArray:
18912 case Array::OriginalNonArray:
18913 m_out.branch(
18914 m_out.testIsZero32(indexingType, m_out.constInt32(IsArray)),
18915 unsure(trueCase), unsure(continuation));
18916 break;
18917
18918 case Array::PossiblyArray:
18919 m_out.jump(trueCase);
18920 break;
18921 }
18922
18923 m_out.appendTo(trueCase, continuation);
18924 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
18925 m_out.jump(continuation);
18926
18927 m_out.appendTo(continuation, lastNext);
18928 return m_out.phi(Int32, falseValue, trueValue);
18929 }
18930
18931 default:
18932 break;
18933 }
18934 DFG_CRASH(m_graph, m_node, "Corrupt array class");
18935 }
18936
18937 LValue isArrayTypeForCheckArray(LValue cell, ArrayMode arrayMode)
18938 {
18939 switch (arrayMode.type()) {
18940 case Array::Int32:
18941 case Array::Double:
18942 case Array::Contiguous:
18943 case Array::Undecided:
18944 case Array::ArrayStorage:
18945 case Array::SlowPutArrayStorage:
18946 return isArrayTypeForArrayify(cell, arrayMode);
18947
18948 case Array::DirectArguments:
18949 return m_out.equal(
18950 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
18951 m_out.constInt32(DirectArgumentsType));
18952
18953 case Array::ScopedArguments:
18954 return m_out.equal(
18955 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
18956 m_out.constInt32(ScopedArgumentsType));
18957
18958 default: {
18959 DFG_ASSERT(m_graph, m_node, arrayMode.isSomeTypedArrayView());
18960 if (arrayMode.type() == Array::AnyTypedArray)
18961 return isTypedArrayView(cell);
18962 return m_out.equal(
18963 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
18964 m_out.constInt32(typeForTypedArrayType(arrayMode.typedArrayType())));
18965 }
18966 }
18967 }
18968
18969 LValue isFunction(LValue cell, SpeculatedType type = SpecFullTop)
18970 {
18971 if (LValue proven = isProvenValue(type & SpecCell, SpecFunction))
18972 return proven;
18973 return isType(cell, JSFunctionType);
18974 }
18975 LValue isNotFunction(LValue cell, SpeculatedType type = SpecFullTop)
18976 {
18977 if (LValue proven = isProvenValue(type & SpecCell, ~SpecFunction))
18978 return proven;
18979 return isNotType(cell, JSFunctionType);
18980 }
18981
18982 LValue isExoticForTypeof(LValue cell, SpeculatedType type = SpecFullTop)
18983 {
18984 if (!(type & SpecObjectOther))
18985 return m_out.booleanFalse;
18986 return m_out.testNonZero32(
18987 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
18988 m_out.constInt32(MasqueradesAsUndefined | OverridesGetCallData));
18989 }
18990
18991 LValue isType(LValue cell, JSType type)
18992 {
18993 return m_out.equal(
18994 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
18995 m_out.constInt32(type));
18996 }
18997
18998 LValue isNotType(LValue cell, JSType type)
18999 {
19000 return m_out.logicalNot(isType(cell, type));
19001 }
19002
19003 void speculateObject(Edge edge, LValue cell)
19004 {
19005 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
19006 }
19007
19008 void speculateObject(Edge edge)
19009 {
19010 speculateObject(edge, lowCell(edge));
19011 }
19012
19013 void speculateArray(Edge edge, LValue cell)
19014 {
19015 FTL_TYPE_CHECK(
19016 jsValueValue(cell), edge, SpecArray, isNotType(cell, ArrayType));
19017 }
19018
19019 void speculateArray(Edge edge)
19020 {
19021 speculateArray(edge, lowCell(edge));
19022 }
19023
19024 void speculateFunction(Edge edge, LValue cell)
19025 {
19026 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecFunction, isNotFunction(cell));
19027 }
19028
19029 void speculateFunction(Edge edge)
19030 {
19031 speculateFunction(edge, lowCell(edge));
19032 }
19033
19034 void speculateObjectOrOther(Edge edge)
19035 {
19036 if (!m_interpreter.needsTypeCheck(edge))
19037 return;
19038
19039 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19040
19041 LBasicBlock cellCase = m_out.newBlock();
19042 LBasicBlock primitiveCase = m_out.newBlock();
19043 LBasicBlock continuation = m_out.newBlock();
19044
19045 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
19046
19047 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
19048
19049 FTL_TYPE_CHECK(
19050 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
19051
19052 m_out.jump(continuation);
19053
19054 m_out.appendTo(primitiveCase, continuation);
19055
19056 FTL_TYPE_CHECK(
19057 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
19058
19059 m_out.jump(continuation);
19060
19061 m_out.appendTo(continuation, lastNext);
19062 }
19063
19064 void speculateFinalObject(Edge edge, LValue cell)
19065 {
19066 FTL_TYPE_CHECK(
19067 jsValueValue(cell), edge, SpecFinalObject, isNotType(cell, FinalObjectType));
19068 }
19069
19070 void speculateFinalObject(Edge edge)
19071 {
19072 speculateFinalObject(edge, lowCell(edge));
19073 }
19074
19075 void speculateRegExpObject(Edge edge, LValue cell)
19076 {
19077 FTL_TYPE_CHECK(
19078 jsValueValue(cell), edge, SpecRegExpObject, isNotType(cell, RegExpObjectType));
19079 }
19080
19081 void speculateRegExpObject(Edge edge)
19082 {
19083 speculateRegExpObject(edge, lowCell(edge));
19084 }
19085
19086 void speculateProxyObject(Edge edge, LValue cell)
19087 {
19088 FTL_TYPE_CHECK(
19089 jsValueValue(cell), edge, SpecProxyObject, isNotType(cell, ProxyObjectType));
19090 }
19091
19092 void speculateProxyObject(Edge edge)
19093 {
19094 speculateProxyObject(edge, lowCell(edge));
19095 }
19096
19097 void speculateDerivedArray(Edge edge, LValue cell)
19098 {
19099 FTL_TYPE_CHECK(
19100 jsValueValue(cell), edge, SpecDerivedArray, isNotType(cell, DerivedArrayType));
19101 }
19102
19103 void speculateDerivedArray(Edge edge)
19104 {
19105 speculateDerivedArray(edge, lowCell(edge));
19106 }
19107
19108 void speculatePromiseObject(Edge edge, LValue cell)
19109 {
19110 FTL_TYPE_CHECK(
19111 jsValueValue(cell), edge, SpecPromiseObject, isNotType(cell, JSPromiseType));
19112 }
19113
19114 void speculatePromiseObject(Edge edge)
19115 {
19116 speculatePromiseObject(edge, lowCell(edge));
19117 }
19118
19119 void speculateDateObject(Edge edge, LValue cell)
19120 {
19121 FTL_TYPE_CHECK(
19122 jsValueValue(cell), edge, SpecDateObject, isNotType(cell, JSDateType));
19123 }
19124
19125 void speculateDateObject(Edge edge)
19126 {
19127 speculateDateObject(edge, lowCell(edge));
19128 }
19129
19130 void speculateMapObject(Edge edge, LValue cell)
19131 {
19132 FTL_TYPE_CHECK(
19133 jsValueValue(cell), edge, SpecMapObject, isNotType(cell, JSMapType));
19134 }
19135
19136 void speculateMapObject(Edge edge)
19137 {
19138 speculateMapObject(edge, lowCell(edge));
19139 }
19140
19141 void speculateSetObject(Edge edge, LValue cell)
19142 {
19143 FTL_TYPE_CHECK(
19144 jsValueValue(cell), edge, SpecSetObject, isNotType(cell, JSSetType));
19145 }
19146
19147 void speculateSetObject(Edge edge)
19148 {
19149 speculateSetObject(edge, lowCell(edge));
19150 }
19151
19152 void speculateWeakMapObject(Edge edge, LValue cell)
19153 {
19154 FTL_TYPE_CHECK(
19155 jsValueValue(cell), edge, SpecWeakMapObject, isNotType(cell, JSWeakMapType));
19156 }
19157
19158 void speculateWeakMapObject(Edge edge)
19159 {
19160 speculateWeakMapObject(edge, lowCell(edge));
19161 }
19162
19163 void speculateWeakSetObject(Edge edge, LValue cell)
19164 {
19165 FTL_TYPE_CHECK(
19166 jsValueValue(cell), edge, SpecWeakSetObject, isNotType(cell, JSWeakSetType));
19167 }
19168
19169 void speculateWeakSetObject(Edge edge)
19170 {
19171 speculateWeakSetObject(edge, lowCell(edge));
19172 }
19173
19174 void speculateDataViewObject(Edge edge, LValue cell)
19175 {
19176 FTL_TYPE_CHECK(
19177 jsValueValue(cell), edge, SpecDataViewObject, isNotType(cell, DataViewType));
19178 }
19179
19180 void speculateDataViewObject(Edge edge)
19181 {
19182 speculateDataViewObject(edge, lowCell(edge));
19183 }
19184
19185 void speculateString(Edge edge, LValue cell)
19186 {
19187 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecString, isNotString(cell));
19188 }
19189
19190 void speculateString(Edge edge)
19191 {
19192 speculateString(edge, lowCell(edge));
19193 }
19194
19195 void speculateStringOrOther(Edge edge, LValue value)
19196 {
19197 if (!m_interpreter.needsTypeCheck(edge))
19198 return;
19199
19200 LBasicBlock cellCase = m_out.newBlock();
19201 LBasicBlock notCellCase = m_out.newBlock();
19202 LBasicBlock continuation = m_out.newBlock();
19203
19204 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
19205
19206 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
19207
19208 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
19209
19210 m_out.jump(continuation);
19211 m_out.appendTo(notCellCase, continuation);
19212
19213 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
19214
19215 m_out.jump(continuation);
19216 m_out.appendTo(continuation, lastNext);
19217 }
19218
19219 void speculateStringOrOther(Edge edge)
19220 {
19221 speculateStringOrOther(edge, lowJSValue(edge, ManualOperandSpeculation));
19222 }
19223
19224 void speculateStringIdent(Edge edge, LValue string, LValue stringImpl)
19225 {
19226 if (!m_interpreter.needsTypeCheck(edge, SpecStringIdent | ~SpecString))
19227 return;
19228
19229 speculate(BadType, jsValueValue(string), edge.node(), isRopeString(string));
19230 speculate(
19231 BadType, jsValueValue(string), edge.node(),
19232 m_out.testIsZero32(
19233 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
19234 m_out.constInt32(StringImpl::flagIsAtom())));
19235 m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
19236 }
19237
19238 void speculateStringIdent(Edge edge)
19239 {
19240 lowStringIdent(edge);
19241 }
19242
19243 void speculateStringObject(Edge edge)
19244 {
19245 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
19246 return;
19247
19248 speculateStringObjectForCell(edge, lowCell(edge));
19249 }
19250
19251 void speculateStringOrStringObject(Edge edge)
19252 {
19253 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
19254 return;
19255
19256 LValue cellBase = lowCell(edge);
19257 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
19258 return;
19259
19260 LBasicBlock notString = m_out.newBlock();
19261 LBasicBlock continuation = m_out.newBlock();
19262
19263 LValue type = m_out.load8ZeroExt32(cellBase, m_heaps.JSCell_typeInfoType);
19264 m_out.branch(
19265 m_out.equal(type, m_out.constInt32(StringType)),
19266 unsure(continuation), unsure(notString));
19267
19268 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
19269 speculate(
19270 BadType, jsValueValue(cellBase), edge.node(),
19271 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
19272 m_out.jump(continuation);
19273
19274 m_out.appendTo(continuation, lastNext);
19275 m_interpreter.filter(edge, SpecString | SpecStringObject);
19276 }
19277
19278 void speculateStringObjectForCell(Edge edge, LValue cell)
19279 {
19280 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
19281 return;
19282
19283 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
19284 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecStringObject, m_out.notEqual(type, m_out.constInt32(StringObjectType)));
19285 }
19286
19287 void speculateSymbol(Edge edge, LValue cell)
19288 {
19289 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecSymbol, isNotSymbol(cell));
19290 }
19291
19292 void speculateSymbol(Edge edge)
19293 {
19294 speculateSymbol(edge, lowCell(edge));
19295 }
19296
19297 void speculateHeapBigInt(Edge edge, LValue cell)
19298 {
19299 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecHeapBigInt, isNotHeapBigInt(cell));
19300 }
19301 void speculateHeapBigInt(Edge edge)
19302 {
19303 speculateHeapBigInt(edge, lowCell(edge));
19304 }
19305
19306#if USE(BIGINT32)
19307 void speculateBigInt32(Edge edge)
19308 {
19309 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19310 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBigInt32, isNotBigInt32(value));
19311 }
19312
19313 void speculateAnyBigInt(Edge edge)
19314 {
19315 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19316 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBigInt, isNotAnyBigInt(value));
19317 }
19318#endif
19319
19320 void speculateNonNullObject(Edge edge, LValue cell)
19321 {
19322 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
19323 if (masqueradesAsUndefinedWatchpointIsStillValid())
19324 return;
19325
19326 speculate(
19327 BadType, jsValueValue(cell), edge.node(),
19328 m_out.testNonZero32(
19329 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
19330 m_out.constInt32(MasqueradesAsUndefined)));
19331 }
19332
19333 void speculateNumber(Edge edge)
19334 {
19335 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19336 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isNotNumber(value));
19337 }
19338
19339 void speculateRealNumber(Edge edge)
19340 {
19341 // Do an early return here because lowDouble() can create a lot of control flow.
19342 if (!m_interpreter.needsTypeCheck(edge))
19343 return;
19344
19345 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19346 LValue doubleValue = unboxDouble(value);
19347
19348 LBasicBlock intCase = m_out.newBlock();
19349 LBasicBlock continuation = m_out.newBlock();
19350
19351 m_out.branch(
19352 m_out.doubleEqual(doubleValue, doubleValue),
19353 usually(continuation), rarely(intCase));
19354
19355 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
19356
19357 typeCheck(
19358 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
19359 isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
19360 m_out.jump(continuation);
19361
19362 m_out.appendTo(continuation, lastNext);
19363 }
19364
19365 void speculateDoubleRepReal(Edge edge)
19366 {
19367 // Do an early return here because lowDouble() can create a lot of control flow.
19368 if (!m_interpreter.needsTypeCheck(edge))
19369 return;
19370
19371 LValue value = lowDouble(edge);
19372 FTL_TYPE_CHECK(
19373 doubleValue(value), edge, SpecDoubleReal,
19374 m_out.doubleNotEqualOrUnordered(value, value));
19375 }
19376
19377 void speculateDoubleRepAnyInt(Edge edge)
19378 {
19379 if (!m_interpreter.needsTypeCheck(edge))
19380 return;
19381
19382 doubleToStrictInt52(edge, lowDouble(edge));
19383 }
19384
19385 void speculateBoolean(Edge edge)
19386 {
19387 lowBoolean(edge);
19388 }
19389
19390 void speculateNotStringVar(Edge edge)
19391 {
19392 if (!m_interpreter.needsTypeCheck(edge, ~SpecStringVar))
19393 return;
19394
19395 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19396
19397 LBasicBlock isCellCase = m_out.newBlock();
19398 LBasicBlock isStringCase = m_out.newBlock();
19399 LBasicBlock continuation = m_out.newBlock();
19400
19401 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
19402
19403 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
19404 m_out.branch(isString(value, provenType(edge)), unsure(isStringCase), unsure(continuation));
19405
19406 m_out.appendTo(isStringCase, continuation);
19407 speculateStringIdent(edge, value, m_out.loadPtr(value, m_heaps.JSString_value));
19408 m_out.jump(continuation);
19409
19410 m_out.appendTo(continuation, lastNext);
19411 }
19412
19413 void speculateNotSymbol(Edge edge)
19414 {
19415 if (!m_interpreter.needsTypeCheck(edge, ~SpecSymbol))
19416 return;
19417
19418 ASSERT(mayHaveTypeCheck(edge.useKind()));
19419 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19420
19421 LBasicBlock isCellCase = m_out.newBlock();
19422 LBasicBlock continuation = m_out.newBlock();
19423
19424 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
19425
19426 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
19427 speculate(BadType, jsValueValue(value), edge.node(), isSymbol(value));
19428 m_out.jump(continuation);
19429
19430 m_out.appendTo(continuation, lastNext);
19431
19432 m_interpreter.filter(edge, ~SpecSymbol);
19433 }
19434
19435 void speculateOther(Edge edge)
19436 {
19437 if (!m_interpreter.needsTypeCheck(edge))
19438 return;
19439
19440 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19441 typeCheck(jsValueValue(value), edge, SpecOther, isNotOther(value));
19442 }
19443
19444 void speculateMisc(Edge edge)
19445 {
19446 if (!m_interpreter.needsTypeCheck(edge))
19447 return;
19448
19449 LValue value = lowJSValue(edge, ManualOperandSpeculation);
19450 typeCheck(jsValueValue(value), edge, SpecMisc, isNotMisc(value));
19451 }
19452
19453 void speculateTypedArrayIsNotDetached(LValue base)
19454 {
19455 LBasicBlock isWasteful = m_out.newBlock();
19456 LBasicBlock continuation = m_out.newBlock();
19457
19458 LValue mode = m_out.load32(base, m_heaps.JSArrayBufferView_mode);
19459 m_out.branch(m_out.equal(mode, m_out.constInt32(WastefulTypedArray)),
19460 unsure(isWasteful), unsure(continuation));
19461
19462 LBasicBlock lastNext = m_out.appendTo(isWasteful, continuation);
19463 LValue vector = m_out.loadPtr(base, m_heaps.JSArrayBufferView_vector);
19464 // FIXME: We could probably make this a mask.
19465 // https://bugs.webkit.org/show_bug.cgi?id=197701
19466 vector = removeArrayPtrTag(vector);
19467 speculate(Uncountable, jsValueValue(vector), m_node, m_out.isZero64(vector));
19468 m_out.jump(continuation);
19469
19470 m_out.appendTo(continuation, lastNext);
19471 }
19472
19473 bool masqueradesAsUndefinedWatchpointIsStillValid()
19474 {
19475 return m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_origin.semantic);
19476 }
19477
19478 LValue loadCellState(LValue base)
19479 {
19480 return m_out.load8ZeroExt32(base, m_heaps.JSCell_cellState);
19481 }
19482
19483 void emitStoreBarrier(LValue base, bool isFenced)
19484 {
19485 LBasicBlock recheckPath = nullptr;
19486 if (isFenced)
19487 recheckPath = m_out.newBlock();
19488 LBasicBlock slowPath = m_out.newBlock();
19489 LBasicBlock continuation = m_out.newBlock();
19490
19491 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isFenced ? recheckPath : slowPath);
19492
19493 LValue threshold;
19494 if (isFenced)
19495 threshold = m_out.load32(m_out.absolute(vm().heap.addressOfBarrierThreshold()));
19496 else
19497 threshold = m_out.constInt32(blackThreshold);
19498
19499 m_out.branch(
19500 m_out.above(loadCellState(base), threshold),
19501 usually(continuation), rarely(isFenced ? recheckPath : slowPath));
19502
19503 if (isFenced) {
19504 m_out.appendTo(recheckPath, slowPath);
19505
19506 m_out.fence(&m_heaps.root, &m_heaps.JSCell_cellState);
19507
19508 m_out.branch(
19509 m_out.above(loadCellState(base), m_out.constInt32(blackThreshold)),
19510 usually(continuation), rarely(slowPath));
19511 }
19512
19513 m_out.appendTo(slowPath, continuation);
19514
19515 LValue call = vmCall(Void, operationWriteBarrierSlowPath, m_vmValue, base);
19516 m_heaps.decorateCCallRead(&m_heaps.root, call);
19517 m_heaps.decorateCCallWrite(&m_heaps.JSCell_cellState, call);
19518
19519 m_out.jump(continuation);
19520
19521 m_out.appendTo(continuation, lastNext);
19522 }
19523
19524 void mutatorFence()
19525 {
19526 if (isX86()) {
19527 m_out.fence(&m_heaps.root, nullptr);
19528 return;
19529 }
19530
19531 LBasicBlock slowPath = m_out.newBlock();
19532 LBasicBlock continuation = m_out.newBlock();
19533
19534 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
19535
19536 m_out.branch(
19537 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
19538 rarely(slowPath), usually(continuation));
19539
19540 m_out.appendTo(slowPath, continuation);
19541
19542 m_out.fence(&m_heaps.root, nullptr);
19543 m_out.jump(continuation);
19544
19545 m_out.appendTo(continuation, lastNext);
19546 }
19547
19548 void nukeStructureAndSetButterfly(LValue butterfly, LValue object)
19549 {
19550 if (isX86()) {
19551 m_out.store32(
19552 m_out.bitOr(
19553 m_out.load32(object, m_heaps.JSCell_structureID),
19554 m_out.constInt32(nukedStructureIDBit())),
19555 object, m_heaps.JSCell_structureID);
19556 m_out.fence(&m_heaps.root, nullptr);
19557 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
19558 m_out.fence(&m_heaps.root, nullptr);
19559 return;
19560 }
19561
19562 LBasicBlock fastPath = m_out.newBlock();
19563 LBasicBlock slowPath = m_out.newBlock();
19564 LBasicBlock continuation = m_out.newBlock();
19565
19566 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastPath);
19567
19568 m_out.branch(
19569 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
19570 rarely(slowPath), usually(fastPath));
19571
19572 m_out.appendTo(fastPath, slowPath);
19573
19574 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
19575 m_out.jump(continuation);
19576
19577 m_out.appendTo(slowPath, continuation);
19578
19579 m_out.store32(
19580 m_out.bitOr(
19581 m_out.load32(object, m_heaps.JSCell_structureID),
19582 m_out.constInt32(nukedStructureIDBit())),
19583 object, m_heaps.JSCell_structureID);
19584 m_out.fence(&m_heaps.root, nullptr);
19585 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
19586 m_out.fence(&m_heaps.root, nullptr);
19587 m_out.jump(continuation);
19588
19589 m_out.appendTo(continuation, lastNext);
19590 }
19591
19592 template<typename OperationType, typename... Args>
19593 LValue vmCall(LType type, OperationType function, Args&&... args)
19594 {
19595 static_assert(!std::is_same<OperationType, LValue>::value);
19596 if constexpr (!std::is_same_v<FunctionPtr<OperationPtrTag>, OperationType>)
19597 static_assert(FunctionTraits<OperationType>::cCallArity() == sizeof...(Args), "Sanity check");
19598 callPreflight();
19599 LValue result = m_out.call(type, m_out.operation(function), std::forward<Args>(args)...);
19600 if (mayExit(m_graph, m_node))
19601 callCheck();
19602 else {
19603 // We can't exit due to an exception, so we also can't throw an exception.
19604#ifndef NDEBUG
19605 LBasicBlock crash = m_out.newBlock();
19606 LBasicBlock continuation = m_out.newBlock();
19607
19608 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
19609 LValue hadException = m_out.notZero64(exception);
19610
19611 m_out.branch(
19612 hadException, rarely(crash), usually(continuation));
19613
19614 LBasicBlock lastNext = m_out.appendTo(crash, continuation);
19615 m_out.unreachable();
19616
19617 m_out.appendTo(continuation, lastNext);
19618#endif
19619 }
19620 return result;
19621 }
19622
19623 void callPreflight(CodeOrigin codeOrigin)
19624 {
19625 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.codeOrigins->addCodeOrigin(codeOrigin);
19626 m_out.store32(
19627 m_out.constInt32(callSiteIndex.bits()),
19628 tagFor(VirtualRegister(CallFrameSlot::argumentCountIncludingThis)));
19629#if !USE(BUILTIN_FRAME_ADDRESS) || ASSERT_ENABLED
19630 m_out.storePtr(m_callFrame, m_out.absolute(&vm().topCallFrame));
19631#endif
19632 }
19633
19634 void callPreflight()
19635 {
19636 callPreflight(codeOriginDescriptionOfCallSite());
19637 }
19638
19639 CodeOrigin codeOriginDescriptionOfCallSite() const
19640 {
19641 CodeOrigin codeOrigin = m_origin.semantic;
19642 if (m_node->op() == TailCallInlinedCaller
19643 || m_node->op() == TailCallVarargsInlinedCaller
19644 || m_node->op() == TailCallForwardVarargsInlinedCaller
19645 || m_node->op() == DirectTailCallInlinedCaller) {
19646 // This case arises when you have a situation like this:
19647 // foo makes a call to bar, bar is inlined in foo. bar makes a call
19648 // to baz and baz is inlined in bar. And then baz makes a tail-call to jaz,
19649 // and jaz is inlined in baz. We want the callframe for jaz to appear to
19650 // have caller be bar.
19651 codeOrigin = *codeOrigin.inlineCallFrame()->getCallerSkippingTailCalls();
19652 }
19653
19654 return codeOrigin;
19655 }
19656
19657 void callCheck()
19658 {
19659 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_origin.semantic);
19660 if (Options::useExceptionFuzz()) {
19661#if !USE(BUILTIN_FRAME_ADDRESS) || ASSERT_ENABLED
19662 m_out.storePtr(m_callFrame, m_out.absolute(&vm().topCallFrame));
19663#endif
19664 m_out.call(Void, m_out.operation(operationExceptionFuzz), weakPointer(globalObject));
19665 }
19666
19667 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
19668 LValue hadException = m_out.notZero64(exception);
19669
19670 CodeOrigin opCatchOrigin;
19671 HandlerInfo* exceptionHandler;
19672 if (m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler)) {
19673 bool exitOK = true;
19674 bool isExceptionHandler = true;
19675 appendOSRExit(
19676 ExceptionCheck, noValue(), nullptr, hadException,
19677 m_origin.withForExitAndExitOK(opCatchOrigin, exitOK), isExceptionHandler);
19678 return;
19679 }
19680
19681 LBasicBlock continuation = m_out.newBlock();
19682
19683 m_out.branch(
19684 hadException, rarely(m_handleExceptions), usually(continuation));
19685
19686 m_out.appendTo(continuation);
19687 }
19688
19689 RefPtr<PatchpointExceptionHandle> preparePatchpointForExceptions(PatchpointValue* value)
19690 {
19691 CodeOrigin opCatchOrigin;
19692 HandlerInfo* exceptionHandler;
19693 bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler);
19694 if (!willCatchException)
19695 return PatchpointExceptionHandle::defaultHandle(m_ftlState, m_nodeIndexInGraph);
19696
19697 dataLogLnIf(verboseCompilationEnabled(), " Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
19698
19699 bool exitOK = true;
19700 NodeOrigin origin = m_origin.withForExitAndExitOK(opCatchOrigin, exitOK);
19701
19702 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(noValue(), nullptr);
19703
19704 // Compute the offset into the StackmapGenerationParams where we will find the exit arguments
19705 // we are about to append. We need to account for both the children we've already added, and
19706 // for the possibility of a result value if the patchpoint is not void.
19707 unsigned offset = value->numChildren();
19708 if (value->type() != Void)
19709 offset++;
19710
19711 // Use LateColdAny to ensure that the stackmap arguments interfere with the patchpoint's
19712 // result and with any late-clobbered registers.
19713 value->appendVectorWithRep(
19714 buildExitArguments(exitDescriptor, opCatchOrigin, noValue()),
19715 ValueRep::LateColdAny);
19716
19717 return PatchpointExceptionHandle::create(
19718 m_ftlState, exitDescriptor, origin, m_nodeIndexInGraph, offset, *exceptionHandler);
19719 }
19720
19721 LBasicBlock lowBlock(DFG::BasicBlock* block)
19722 {
19723 return m_blocks.get(block);
19724 }
19725
19726 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, Node* highValue)
19727 {
19728 return appendOSRExitDescriptor(lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue));
19729 }
19730
19731 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, const MethodOfGettingAValueProfile& profile)
19732 {
19733 return &m_ftlState.jitCode->osrExitDescriptors.alloc(
19734 lowValue.format(), profile,
19735 availabilityMap().m_locals.numberOfArguments(),
19736 availabilityMap().m_locals.numberOfLocals(),
19737 availabilityMap().m_locals.numberOfTmps());
19738 }
19739
19740 void appendOSRExit(
19741 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition,
19742 NodeOrigin origin, bool isExceptionHandler = false)
19743 {
19744 return appendOSRExit(kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue),
19745 failCondition, origin, isExceptionHandler);
19746 }
19747
19748 void appendOSRExit(
19749 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition,
19750 NodeOrigin origin, bool isExceptionHandler = false)
19751 {
19752 dataLogLnIf(verboseCompilationEnabled(), " OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
19753
19754 DFG_ASSERT(m_graph, m_node, origin.exitOK);
19755
19756 if (!isExceptionHandler
19757 && Options::useOSRExitFuzz()
19758 && canUseOSRExitFuzzing(m_graph.baselineCodeBlockFor(m_origin.semantic))
19759 && doOSRExitFuzzing()) {
19760 LValue numberOfFuzzChecks = m_out.add(
19761 m_out.load32(m_out.absolute(&g_numberOfOSRExitFuzzChecks)),
19762 m_out.int32One);
19763
19764 m_out.store32(numberOfFuzzChecks, m_out.absolute(&g_numberOfOSRExitFuzzChecks));
19765
19766 if (unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter()) {
19767 failCondition = m_out.bitOr(
19768 failCondition,
19769 m_out.aboveOrEqual(numberOfFuzzChecks, m_out.constInt32(atOrAfter)));
19770 }
19771 if (unsigned at = Options::fireOSRExitFuzzAt()) {
19772 failCondition = m_out.bitOr(
19773 failCondition,
19774 m_out.equal(numberOfFuzzChecks, m_out.constInt32(at)));
19775 }
19776 }
19777
19778 if (failCondition == m_out.booleanFalse)
19779 return;
19780
19781 blessSpeculation(
19782 m_out.speculate(failCondition), kind, lowValue, profile, origin);
19783 }
19784
19785 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, Node* highValue, NodeOrigin origin)
19786 {
19787 blessSpeculation(value, kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue), origin);
19788 }
19789
19790 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, NodeOrigin origin)
19791 {
19792 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, profile);
19793
19794 value->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, lowValue));
19795
19796 State* state = &m_ftlState;
19797 auto nodeIndex = m_nodeIndexInGraph;
19798 value->setGenerator(
19799 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
19800 exitDescriptor->emitOSRExit(
19801 *state, kind, origin, jit, params, nodeIndex, 0);
19802 });
19803 }
19804
19805 StackmapArgumentList buildExitArguments(
19806 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, FormattedValue lowValue,
19807 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
19808 {
19809 StackmapArgumentList result;
19810 buildExitArguments(
19811 exitDescriptor, exitOrigin, result, lowValue, offsetOfExitArgumentsInStackmapLocations);
19812 return result;
19813 }
19814
19815 void buildExitArguments(
19816 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, StackmapArgumentList& arguments, FormattedValue lowValue,
19817 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
19818 {
19819 if (!!lowValue)
19820 arguments.append(lowValue.value());
19821
19822 AvailabilityMap availabilityMap = this->availabilityMap();
19823 availabilityMap.pruneByLiveness(m_graph, exitOrigin);
19824
19825 HashMap<Node*, ExitTimeObjectMaterialization*> map;
19826 availabilityMap.forEachAvailability(
19827 [&] (Availability availability) {
19828 if (!availability.shouldUseNode())
19829 return;
19830
19831 Node* node = availability.node();
19832 if (!node->isPhantomAllocation())
19833 return;
19834
19835 auto result = map.add(node, nullptr);
19836 if (result.isNewEntry) {
19837 result.iterator->value =
19838 exitDescriptor->m_materializations.add(node->op(), node->origin.semantic);
19839 }
19840 });
19841
19842 for (unsigned i = 0; i < exitDescriptor->m_values.size(); ++i) {
19843 Operand operand = exitDescriptor->m_values.operandForIndex(i);
19844
19845 Availability availability = availabilityMap.m_locals[i];
19846
19847 // FIXME: It seems like we should be able to do at least some validation when OSR entering. https://bugs.webkit.org/show_bug.cgi?id=215511
19848 if (Options::validateFTLOSRExitLiveness()
19849 && m_graph.m_plan.mode() != JITCompilationMode::FTLForOSREntry) {
19850
19851 if (availability.isDead() && m_graph.isLiveInBytecode(operand, exitOrigin))
19852 DFG_CRASH(m_graph, m_node, toCString("Live bytecode local not available: operand = ", operand, ", availability = ", availability, ", origin = ", exitOrigin).data());
19853 }
19854 ExitValue exitValue = exitValueForAvailability(arguments, map, availability);
19855 if (exitValue.hasIndexInStackmapLocations())
19856 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
19857 exitDescriptor->m_values[i] = exitValue;
19858 }
19859
19860 for (auto heapPair : availabilityMap.m_heap) {
19861 Node* node = heapPair.key.base();
19862 ExitTimeObjectMaterialization* materialization = map.get(node);
19863 if (!materialization)
19864 DFG_CRASH(m_graph, m_node, toCString("Could not find materialization for ", node, " in ", availabilityMap).data());
19865 ExitValue exitValue = exitValueForAvailability(arguments, map, heapPair.value);
19866 if (exitValue.hasIndexInStackmapLocations())
19867 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
19868 materialization->add(
19869 heapPair.key.descriptor(),
19870 exitValue);
19871 }
19872
19873 if (verboseCompilationEnabled()) {
19874 dataLog(" Exit values: ", exitDescriptor->m_values, "\n");
19875 if (!exitDescriptor->m_materializations.isEmpty()) {
19876 dataLog(" Materializations: \n");
19877 for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations)
19878 dataLog(" ", pointerDump(materialization), "\n");
19879 }
19880 }
19881 }
19882
19883 ExitValue exitValueForAvailability(
19884 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
19885 Availability availability)
19886 {
19887 FlushedAt flush = availability.flushedAt();
19888 switch (flush.format()) {
19889 case DeadFlush:
19890 case ConflictingFlush:
19891 if (availability.hasNode())
19892 return exitValueForNode(arguments, map, availability.node());
19893
19894 // This means that the value is dead. It could be dead in bytecode or it could have
19895 // been killed by our DCE, which can sometimes kill things even if they were live in
19896 // bytecode.
19897 return ExitValue::dead();
19898
19899 case FlushedJSValue:
19900 case FlushedCell:
19901 case FlushedBoolean:
19902 return ExitValue::inJSStack(flush.virtualRegister());
19903
19904 case FlushedInt32:
19905 return ExitValue::inJSStackAsInt32(flush.virtualRegister());
19906
19907 case FlushedInt52:
19908 return ExitValue::inJSStackAsInt52(flush.virtualRegister());
19909
19910 case FlushedDouble:
19911 return ExitValue::inJSStackAsDouble(flush.virtualRegister());
19912 }
19913
19914 DFG_CRASH(m_graph, m_node, "Invalid flush format");
19915 return ExitValue::dead();
19916 }
19917
19918 ExitValue exitValueForNode(
19919 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
19920 Node* node)
19921 {
19922 // NOTE: In FTL->B3, we cannot generate code here, because m_output is positioned after the
19923 // stackmap value. Like all values, the stackmap value cannot use a child that is defined after
19924 // it.
19925
19926 ASSERT(node->shouldGenerate());
19927 ASSERT(node->hasResult());
19928
19929 if (node) {
19930 switch (node->op()) {
19931 case BottomValue:
19932 // This might arise in object materializations. I actually doubt that it would,
19933 // but it seems worthwhile to be conservative.
19934 return ExitValue::dead();
19935
19936 case JSConstant:
19937 case Int52Constant:
19938 case DoubleConstant:
19939 return ExitValue::constant(node->asJSValue());
19940
19941 default:
19942 if (node->isPhantomAllocation())
19943 return ExitValue::materializeNewObject(map.get(node));
19944 break;
19945 }
19946 }
19947
19948 LoweredNodeValue value = m_int32Values.get(node);
19949 if (isValid(value))
19950 return exitArgument(arguments, DataFormatInt32, value.value());
19951
19952 value = m_int52Values.get(node);
19953 if (isValid(value))
19954 return exitArgument(arguments, DataFormatInt52, value.value());
19955
19956 value = m_strictInt52Values.get(node);
19957 if (isValid(value))
19958 return exitArgument(arguments, DataFormatStrictInt52, value.value());
19959
19960 value = m_booleanValues.get(node);
19961 if (isValid(value))
19962 return exitArgument(arguments, DataFormatBoolean, value.value());
19963
19964 value = m_jsValueValues.get(node);
19965 if (isValid(value))
19966 return exitArgument(arguments, DataFormatJS, value.value());
19967
19968 value = m_doubleValues.get(node);
19969 if (isValid(value))
19970 return exitArgument(arguments, DataFormatDouble, value.value());
19971
19972 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
19973 return ExitValue::dead();
19974 }
19975
19976 ExitValue exitArgument(StackmapArgumentList& arguments, DataFormat format, LValue value)
19977 {
19978 ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
19979 arguments.append(value);
19980 return result;
19981 }
19982
19983 ExitValue exitValueForTailCall(StackmapArgumentList& arguments, Node* node)
19984 {
19985 ASSERT(node->shouldGenerate());
19986 ASSERT(node->hasResult());
19987
19988 switch (node->op()) {
19989 case JSConstant:
19990 case Int52Constant:
19991 case DoubleConstant:
19992 return ExitValue::constant(node->asJSValue());
19993
19994 default:
19995 break;
19996 }
19997
19998 LoweredNodeValue value = m_jsValueValues.get(node);
19999 if (isValid(value))
20000 return exitArgument(arguments, DataFormatJS, value.value());
20001
20002 value = m_int32Values.get(node);
20003 if (isValid(value))
20004 return exitArgument(arguments, DataFormatJS, boxInt32(value.value()));
20005
20006 value = m_booleanValues.get(node);
20007 if (isValid(value))
20008 return exitArgument(arguments, DataFormatJS, boxBoolean(value.value()));
20009
20010 // Doubles and Int52 have been converted by ValueRep()
20011 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
20012 }
20013
20014 void setInt32(Node* node, LValue value)
20015 {
20016 m_int32Values.set(node, LoweredNodeValue(value, m_highBlock));
20017 }
20018 void setInt52(Node* node, LValue value)
20019 {
20020 m_int52Values.set(node, LoweredNodeValue(value, m_highBlock));
20021 }
20022 void setStrictInt52(Node* node, LValue value)
20023 {
20024 m_strictInt52Values.set(node, LoweredNodeValue(value, m_highBlock));
20025 }
20026 void setInt52(Node* node, LValue value, Int52Kind kind)
20027 {
20028 switch (kind) {
20029 case Int52:
20030 setInt52(node, value);
20031 return;
20032
20033 case StrictInt52:
20034 setStrictInt52(node, value);
20035 return;
20036 }
20037
20038 DFG_CRASH(m_graph, m_node, "Corrupt int52 kind");
20039 }
20040 void setJSValue(Node* node, LValue value)
20041 {
20042 m_jsValueValues.set(node, LoweredNodeValue(value, m_highBlock));
20043 }
20044 void setBoolean(Node* node, LValue value)
20045 {
20046 m_booleanValues.set(node, LoweredNodeValue(value, m_highBlock));
20047 }
20048 void setStorage(Node* node, LValue value)
20049 {
20050 m_storageValues.set(node, LoweredNodeValue(value, m_highBlock));
20051 }
20052 void setDouble(Node* node, LValue value)
20053 {
20054 m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock));
20055 }
20056
20057 void setInt32(LValue value)
20058 {
20059 setInt32(m_node, value);
20060 }
20061 void setInt52(LValue value)
20062 {
20063 setInt52(m_node, value);
20064 }
20065 void setStrictInt52(LValue value)
20066 {
20067 setStrictInt52(m_node, value);
20068 }
20069 void setInt52(LValue value, Int52Kind kind)
20070 {
20071 setInt52(m_node, value, kind);
20072 }
20073 void setJSValue(LValue value)
20074 {
20075 setJSValue(m_node, value);
20076 }
20077 void setBoolean(LValue value)
20078 {
20079 setBoolean(m_node, value);
20080 }
20081 void setStorage(LValue value)
20082 {
20083 setStorage(m_node, value);
20084 }
20085 void setDouble(LValue value)
20086 {
20087 setDouble(m_node, value);
20088 }
20089
20090 bool isValid(const LoweredNodeValue& value)
20091 {
20092 if (!value)
20093 return false;
20094 if (!m_graph.m_ssaDominators->dominates(value.block(), m_highBlock))
20095 return false;
20096 return true;
20097 }
20098
20099 void ensureStillAliveHere(LValue value)
20100 {
20101 PatchpointValue* patchpoint = m_out.patchpoint(Void);
20102 patchpoint->effects = Effects::none();
20103 patchpoint->effects.writesLocalState = true;
20104 patchpoint->effects.reads = HeapRange::top();
20105 patchpoint->append(value, ValueRep::ColdAny);
20106 patchpoint->setGenerator([=] (CCallHelpers&, const StackmapGenerationParams&) { });
20107 }
20108
20109 LValue toButterfly(LValue immutableButterfly)
20110 {
20111 return m_out.addPtr(immutableButterfly, JSImmutableButterfly::offsetOfData());
20112 }
20113
20114 LValue toIntegerOrInfinity(LValue doubleValue)
20115 {
20116 // https://tc39.es/ecma262/#sec-tointegerorinfinity
20117 // 1. If value is either of +0, -0, or NaN, return +0
20118 // 2. Otherwise, return trunc(value)
20119 return m_out.select(m_out.doubleNotEqualAndOrdered(doubleValue, m_out.doubleZero), m_out.doubleTrunc(doubleValue), m_out.doubleZero);
20120 }
20121
20122 void addWeakReference(JSCell* target)
20123 {
20124 m_graph.m_plan.weakReferences().addLazily(target);
20125 }
20126
20127 LValue loadStructure(LValue value)
20128 {
20129 LValue structureID = m_out.load32(value, m_heaps.JSCell_structureID);
20130 LValue tableBase = m_out.loadPtr(m_out.absolute(vm().heap.structureIDTable().base()));
20131 LValue tableIndex = m_out.aShr(structureID, m_out.constInt32(StructureIDTable::s_numberOfEntropyBits));
20132 LValue entropyBits = m_out.shl(m_out.zeroExtPtr(structureID), m_out.constInt32(StructureIDTable::s_entropyBitsShiftForStructurePointer));
20133 TypedPointer address = m_out.baseIndex(m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
20134 LValue encodedStructureBits = m_out.loadPtr(address);
20135 return m_out.bitXor(encodedStructureBits, entropyBits);
20136 }
20137
20138 LValue weakPointer(JSCell* pointer)
20139 {
20140 addWeakReference(pointer);
20141 return m_out.alreadyRegisteredWeakPointer(m_graph, pointer);
20142 }
20143
20144 LValue frozenPointer(FrozenValue* value)
20145 {
20146 return m_out.alreadyRegisteredFrozenPointer(value);
20147 }
20148
20149 LValue weakStructureID(RegisteredStructure structure)
20150 {
20151 return m_out.constInt32(structure->id());
20152 }
20153
20154 LValue weakStructure(RegisteredStructure structure)
20155 {
20156 ASSERT(!!structure.get());
20157 return m_out.alreadyRegisteredWeakPointer(m_graph, structure.get());
20158 }
20159
20160 TypedPointer addressFor(LValue base, Operand operand, ptrdiff_t offset = 0)
20161 {
20162 return m_out.address(base, m_heaps.variables[operand.virtualRegister().offset()], offset);
20163 }
20164 TypedPointer payloadFor(LValue base, Operand operand)
20165 {
20166 return addressFor(base, operand, PayloadOffset);
20167 }
20168 TypedPointer tagFor(LValue base, Operand operand)
20169 {
20170 return addressFor(base, operand, TagOffset);
20171 }
20172 TypedPointer addressFor(Operand operand, ptrdiff_t offset = 0)
20173 {
20174 return addressFor(operand.virtualRegister(), offset);
20175 }
20176 TypedPointer addressFor(VirtualRegister operand, ptrdiff_t offset = 0)
20177 {
20178 if (operand.isLocal())
20179 return addressFor(m_captured, operand, offset);
20180 return addressFor(m_callFrame, operand, offset);
20181 }
20182 TypedPointer payloadFor(Operand operand)
20183 {
20184 return payloadFor(operand.virtualRegister());
20185 }
20186 TypedPointer payloadFor(VirtualRegister operand)
20187 {
20188 return addressFor(operand, PayloadOffset);
20189 }
20190 TypedPointer tagFor(Operand operand)
20191 {
20192 return tagFor(operand.virtualRegister());
20193 }
20194 TypedPointer tagFor(VirtualRegister operand)
20195 {
20196 return addressFor(operand, TagOffset);
20197 }
20198
20199 AbstractValue abstractValue(Node* node)
20200 {
20201 return m_state.forNode(node);
20202 }
20203 AbstractValue abstractValue(Edge edge)
20204 {
20205 return abstractValue(edge.node());
20206 }
20207
20208 SpeculatedType provenType(Node* node)
20209 {
20210 return abstractValue(node).m_type;
20211 }
20212 SpeculatedType provenType(Edge edge)
20213 {
20214 return provenType(edge.node());
20215 }
20216
20217 JSValue provenValue(Node* node)
20218 {
20219 return abstractValue(node).m_value;
20220 }
20221 JSValue provenValue(Edge edge)
20222 {
20223 return provenValue(edge.node());
20224 }
20225
20226 StructureAbstractValue abstractStructure(Node* node)
20227 {
20228 return abstractValue(node).m_structure;
20229 }
20230 StructureAbstractValue abstractStructure(Edge edge)
20231 {
20232 return abstractStructure(edge.node());
20233 }
20234
20235 void crash()
20236 {
20237 crash(m_highBlock, m_node);
20238 }
20239 void crash(DFG::BasicBlock* block, Node* node)
20240 {
20241 BlockIndex blockIndex = block->index;
20242 unsigned nodeIndex = node ? node->index() : UINT_MAX;
20243#if !ASSERT_ENABLED
20244 auto nodeOp = node ? node->op() : LastNodeType;
20245 m_out.patchpoint(Void)->setGenerator(
20246 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
20247 AllowMacroScratchRegisterUsage allowScratch(jit);
20248
20249 jit.move(CCallHelpers::TrustedImm32(blockIndex), GPRInfo::regT0);
20250 jit.move(CCallHelpers::TrustedImm32(nodeIndex), GPRInfo::regT1);
20251 if (node)
20252 jit.move(CCallHelpers::TrustedImm32(nodeOp), GPRInfo::regT2);
20253 jit.abortWithReason(FTLCrash);
20254 });
20255#else // ASSERT_ENABLED
20256 m_out.call(
20257 Void,
20258 m_out.operation(ftlUnreachable),
20259 // We don't want the CodeBlock to have a weak pointer to itself because
20260 // that would cause it to always get collected.
20261 m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), m_out.constInt32(blockIndex),
20262 m_out.constInt32(nodeIndex));
20263#endif // ASSERT_ENABLED
20264 m_out.unreachable();
20265 }
20266
20267 AvailabilityMap& availabilityMap() { return m_availabilityCalculator.m_availability; }
20268
20269 VM& vm() { return m_graph.m_vm; }
20270 CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
20271
20272 Graph& m_graph;
20273 State& m_ftlState;
20274 AbstractHeapRepository m_heaps;
20275 Output m_out;
20276 Procedure& m_proc;
20277
20278 LBasicBlock m_handleExceptions;
20279 HashMap<DFG::BasicBlock*, LBasicBlock> m_blocks;
20280
20281 LValue m_callFrame;
20282 LValue m_vmValue;
20283 LValue m_captured;
20284 LValue m_numberTag;
20285 LValue m_notCellMask;
20286
20287 HashMap<Node*, LoweredNodeValue> m_int32Values;
20288 HashMap<Node*, LoweredNodeValue> m_strictInt52Values;
20289 HashMap<Node*, LoweredNodeValue> m_int52Values;
20290 HashMap<Node*, LoweredNodeValue> m_jsValueValues;
20291 HashMap<Node*, LoweredNodeValue> m_booleanValues;
20292 HashMap<Node*, LoweredNodeValue> m_storageValues;
20293 HashMap<Node*, LoweredNodeValue> m_doubleValues;
20294
20295 HashMap<Node*, LValue> m_phis;
20296
20297 LocalOSRAvailabilityCalculator m_availabilityCalculator;
20298
20299 InPlaceAbstractState m_state;
20300 AbstractInterpreter<InPlaceAbstractState> m_interpreter;
20301 DFG::BasicBlock* m_highBlock;
20302 DFG::BasicBlock* m_nextHighBlock;
20303 LBasicBlock m_nextLowBlock;
20304
20305 NodeOrigin m_origin;
20306 unsigned m_nodeIndexInGraph { 0 };
20307 Node* m_node;
20308
20309 // These are used for validating AI state.
20310 HashMap<Node*, NodeSet> m_liveInToNode;
20311 HashMap<Node*, AbstractValue> m_aiCheckedNodes;
20312 String m_graphDump;
20313};
20314
20315} // anonymous namespace
20316
20317void lowerDFGToB3(State& state)
20318{
20319 LowerDFGToB3 lowering(state);
20320 lowering.lower();
20321}
20322
20323} } // namespace JSC::FTL
20324
20325#endif // ENABLE(FTL_JIT)
20326
20327